فهرست منبع

Merge branch 'master' of github.com:provectus/kafka-ui into ISSUE-3504_messagesApiV2

 Conflicts:
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilters.java
	kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java
	kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java
iliax 1 سال پیش
والد
کامیت
3e02e3f6ea
100فایلهای تغییر یافته به همراه1176 افزوده شده و 1512 حذف شده
  1. 2 2
      .github/CODEOWNERS
  2. 92 0
      .github/ISSUE_TEMPLATE/bug.yml
  3. 0 64
      .github/ISSUE_TEMPLATE/bug_report.md
  4. 3 0
      .github/ISSUE_TEMPLATE/config.yml
  5. 66 0
      .github/ISSUE_TEMPLATE/feature.yml
  6. 0 46
      .github/ISSUE_TEMPLATE/feature_request.md
  7. 0 52
      .github/ISSUE_TEMPLATE/k8s.md
  8. 0 4
      .github/dependabot.yml
  9. 8 0
      .github/release_drafter.yaml
  10. 2 2
      .github/workflows/aws_publisher.yaml
  11. 5 2
      .github/workflows/backend.yml
  12. 2 2
      .github/workflows/block_merge.yml
  13. 18 14
      .github/workflows/branch-deploy.yml
  14. 4 10
      .github/workflows/branch-remove.yml
  15. 1 2
      .github/workflows/build-public-image.yml
  16. 0 28
      .github/workflows/create-branch-for-helm.yaml
  17. 1 1
      .github/workflows/cve.yaml
  18. 1 7
      .github/workflows/delete-public-image.yml
  19. 1 1
      .github/workflows/documentation.yaml
  20. 1 1
      .github/workflows/e2e-automation.yml
  21. 6 4
      .github/workflows/e2e-checks.yaml
  22. 1 1
      .github/workflows/e2e-manual.yml
  23. 1 1
      .github/workflows/e2e-weekly.yml
  24. 5 2
      .github/workflows/frontend.yaml
  25. 0 38
      .github/workflows/helm.yaml
  26. 5 4
      .github/workflows/master.yaml
  27. 4 3
      .github/workflows/pr-checks.yaml
  28. 0 39
      .github/workflows/release-helm.yaml
  29. 1 1
      .github/workflows/release-serde-api.yaml
  30. 5 6
      .github/workflows/release.yaml
  31. 17 2
      .github/workflows/release_drafter.yml
  32. 4 4
      .github/workflows/separate_env_public_create.yml
  33. 4 4
      .github/workflows/separate_env_public_remove.yml
  34. 1 1
      .github/workflows/stale.yaml
  35. 1 1
      .github/workflows/terraform-deploy.yml
  36. 1 1
      .github/workflows/triage_issues.yml
  37. 1 1
      .github/workflows/triage_prs.yml
  38. 3 1
      .github/workflows/welcome-first-time-contributors.yml
  39. 1 1
      .github/workflows/workflow_linter.yaml
  40. 3 0
      .gitignore
  41. 1 1
      README.md
  42. 2 1
      SECURITY.md
  43. 0 25
      charts/kafka-ui/.helmignore
  44. 0 7
      charts/kafka-ui/Chart.yaml
  45. 0 1
      charts/kafka-ui/README.md
  46. 0 3
      charts/kafka-ui/index.yaml
  47. 0 21
      charts/kafka-ui/templates/NOTES.txt
  48. 0 84
      charts/kafka-ui/templates/_helpers.tpl
  49. 0 10
      charts/kafka-ui/templates/configmap.yaml
  50. 0 11
      charts/kafka-ui/templates/configmap_fromValues.yaml
  51. 0 150
      charts/kafka-ui/templates/deployment.yaml
  52. 0 46
      charts/kafka-ui/templates/hpa.yaml
  53. 0 89
      charts/kafka-ui/templates/ingress.yaml
  54. 0 18
      charts/kafka-ui/templates/networkpolicy-egress.yaml
  55. 0 18
      charts/kafka-ui/templates/networkpolicy-ingress.yaml
  56. 0 13
      charts/kafka-ui/templates/secret.yaml
  57. 0 22
      charts/kafka-ui/templates/service.yaml
  58. 0 12
      charts/kafka-ui/templates/serviceaccount.yaml
  59. 0 161
      charts/kafka-ui/values.yaml
  60. 2 2
      documentation/compose/DOCKER_COMPOSE.md
  61. 0 0
      documentation/compose/data/message.json
  62. 0 0
      documentation/compose/data/proxy.conf
  63. 2 2
      documentation/compose/e2e-tests.yaml
  64. 5 1
      documentation/compose/jaas/kafka_server.conf
  65. 4 0
      documentation/compose/jaas/zookeeper_jaas.conf
  66. 1 1
      documentation/compose/jmx-exporter/kafka-broker.yml
  67. 2 2
      documentation/compose/kafka-cluster-sr-auth.yaml
  68. 0 84
      documentation/compose/kafka-clusters-only.yaml
  69. 59 0
      documentation/compose/kafka-ui-acl-with-zk.yaml
  70. 3 1
      documentation/compose/kafka-ui-arm64.yaml
  71. 2 2
      documentation/compose/kafka-ui-connectors-auth.yaml
  72. 2 2
      documentation/compose/kafka-ui.yaml
  73. 1 1
      documentation/compose/kafka-with-zookeeper.yaml
  74. 12 15
      documentation/compose/ldap.yaml
  75. 1 1
      documentation/compose/nginx-proxy.yaml
  76. 0 22
      documentation/compose/oauth-cognito.yaml
  77. 0 0
      documentation/compose/traefik-proxy.yaml
  78. 9 4
      kafka-ui-api/pom.xml
  79. 15 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
  80. 31 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java
  81. 11 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/BasicAuthSecurityConfig.java
  82. 6 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/DisabledAuthSecurityConfig.java
  83. 26 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapProperties.java
  84. 86 41
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapSecurityConfig.java
  85. 11 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthProperties.java
  86. 1 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthPropertiesConverter.java
  87. 24 29
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthSecurityConfig.java
  88. 60 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RbacLdapUser.java
  89. 21 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/condition/ActiveDirectoryCondition.java
  90. 2 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/CognitoLogoutSuccessHandler.java
  91. 19 10
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AccessController.java
  92. 126 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AclsController.java
  93. 39 27
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ApplicationConfigController.java
  94. 53 29
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
  95. 11 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java
  96. 33 22
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
  97. 95 59
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java
  98. 37 25
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java
  99. 26 12
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
  100. 64 35
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java

+ 2 - 2
.github/CODEOWNERS

@@ -14,5 +14,5 @@
 # TESTS
 # TESTS
 /kafka-ui-e2e-checks/       @provectus/kafka-qa
 /kafka-ui-e2e-checks/       @provectus/kafka-qa
 
 
-# HELM CHARTS
-/charts/                    @provectus/kafka-devops
+# INFRA
+/.github/workflows/         @provectus/kafka-devops

+ 92 - 0
.github/ISSUE_TEMPLATE/bug.yml

@@ -0,0 +1,92 @@
+name: "\U0001F41E  Bug report"
+description: File a bug report
+labels: ["status/triage", "type/bug"]
+assignees: []
+
+body:
+  - type: markdown
+    attributes:
+      value: |
+        Hi, thanks for raising the issue(-s), all contributions really matter!
+        Please, note that we'll close the issue without further explanation if you don't follow
+        this template and don't provide the information requested within this template.
+
+  - type: checkboxes
+    id: terms
+    attributes:
+      label: Issue submitter TODO list
+      description: By you checking these checkboxes we can be sure you've done the essential things.
+      options:
+        - label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
+          required: true
+        - label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
+          required: true
+        - label: I've tried running `master`-labeled docker image and the issue still persists there
+          required: true
+        - label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
+          required: true
+
+  - type: textarea
+    attributes:
+      label: Describe the bug (actual behavior)
+      description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
+    validations:
+      required: true
+
+  - type: textarea
+    attributes:
+      label: Expected behavior
+      description: A clear and concise description of what you expected to happen
+    validations:
+      required: false
+
+  - type: textarea
+    attributes:
+      label: Your installation details
+      description: |
+        How do you run the app? Please provide as much info as possible:
+        1. App version (commit hash in the top left corner of the UI)
+        2. Helm chart version, if you use one
+        3. Your application config. Please remove the sensitive info like passwords or API keys.
+        4. Any IAAC configs
+    validations:
+      required: true
+
+  - type: textarea
+    attributes:
+      label: Steps to reproduce
+      description: |
+        Please write down the order of the actions required to reproduce the issue.
+        For the advanced setups/complicated issue, we might need you to provide
+        a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
+    validations:
+      required: true
+
+  - type: textarea
+    attributes:
+      label: Screenshots
+      description: |
+        If applicable, add screenshots to help explain your problem
+    validations:
+      required: false
+
+  - type: textarea
+    attributes:
+      label: Logs
+      description: |
+        If applicable, *upload* screenshots to help explain your problem
+    validations:
+      required: false
+
+  - type: textarea
+    attributes:
+      label: Additional context
+      description: |
+        Add any other context about the problem here. E.G.:
+        1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
+          Were they successful or the same issue occurred? Please provide steps as well.
+        2. Related issues (if there are any).
+        3. Logs (if available)
+        4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
+    validations:
+      required: false

+ 0 - 64
.github/ISSUE_TEMPLATE/bug_report.md

@@ -1,64 +0,0 @@
----
-name: "\U0001F41E  Bug report"
-about: Create a bug report
-title: ''
-labels: status/triage, type/bug
-assignees: ''
-
----
-
-<!--
-
-We will close the issue without further explanation if you don't follow this template and don't provide the information requested within this template.
-
-Don't forget to check for existing issues/discussions regarding your proposal. We might already have it.
-https://github.com/provectus/kafka-ui/issues
-https://github.com/provectus/kafka-ui/discussions
-
--->
-
-<!--
-Please follow the naming conventions for bugs:
-<Feature/Area/Scope> :  <Compact, but specific problem summary> 
-Avoid generic titles, like “Topics: incorrect layout of message sorting drop-down list”. Better use something like: “Topics: Message sorting drop-down list overlaps the "Submit" button”.
-
--->
-
-**Describe the bug** (Actual behavior)
-<!--(A clear and concise description of what the bug is.Use a list, if there is more than one problem)-->
-
-**Expected behavior**
-<!--(A clear and concise description of what you expected to happen.)-->
-
-**Set up**
-<!--
-WE MIGHT CLOSE THE ISSUE without further explanation IF YOU DON'T PROVIDE THIS INFORMATION.
-
-How do you run the app? Please provide as much info as possible:
-1. App version (docker image version or check commit hash in the top left corner in UI)
-2. Helm chart version, if you use one
-3. Any IAAC configs
--->
-
-
-**Steps to Reproduce**
-<!-- We'd like you to provide an example setup (via docker-compose, helm, etc.) 
-to reproduce the problem, especially with a complex setups. -->
-
-1. 
-
-**Screenshots**
-<!--
-(If applicable, add screenshots to help explain your problem)
--->
-
-
-**Additional context**
-<!--
-Add any other context about the problem here. E.g.: 
-1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried? 
-   Were they successfull or same issue occured? Please provide steps as well.
-2. Related issues (if there are any).
-3. Logs (if available)
-4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
--->

+ 3 - 0
.github/ISSUE_TEMPLATE/config.yml

@@ -1,5 +1,8 @@
 blank_issues_enabled: false
 blank_issues_enabled: false
 contact_links:
 contact_links:
+  - name: Report helm issue
+    url: https://github.com/provectus/kafka-ui-charts
+    about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
   - name: Official documentation
   - name: Official documentation
     url: https://docs.kafka-ui.provectus.io/
     url: https://docs.kafka-ui.provectus.io/
     about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.
     about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.

+ 66 - 0
.github/ISSUE_TEMPLATE/feature.yml

@@ -0,0 +1,66 @@
+name: "\U0001F680 Feature request"
+description: Propose a new feature
+labels: ["status/triage", "type/feature"]
+assignees: []
+
+body:
+  - type: markdown
+    attributes:
+      value: |
+        Hi, thanks for raising the issue(-s), all contributions really matter!
+        Please, note that we'll close the issue without further explanation if you don't follow
+        this template and don't provide the information requested within this template.
+
+  - type: checkboxes
+    id: terms
+    attributes:
+      label: Issue submitter TODO list
+      description: By you checking these checkboxes we can be sure you've done the essential things.
+      options:
+        - label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
+          required: true
+        - label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md) and the feature is not present there
+          required: true
+
+  - type: textarea
+    attributes:
+      label: Is your proposal related to a problem?
+      description: |
+        Provide a clear and concise description of what the problem is.
+        For example, "I'm always frustrated when..."
+    validations:
+      required: false
+
+  - type: textarea
+    attributes:
+      label: Describe the feature you're interested in
+      description: |
+        Provide a clear and concise description of what you want to happen.
+    validations:
+      required: true
+
+  - type: textarea
+    attributes:
+      label: Describe alternatives you've considered
+      description: |
+        Let us know about other solutions you've tried or researched.
+    validations:
+      required: false
+
+  - type: input
+    attributes:
+      label: Version you're running
+      description: |
+        Please provide the app version you're currently running:
+        1. App version (commit hash in the top left corner of the UI)
+    validations:
+      required: true
+
+  - type: textarea
+    attributes:
+      label: Additional context
+      description: |
+        Is there anything else you can add about the proposal?
+        You might want to link to related issues here, if you haven't already.
+    validations:
+      required: false

+ 0 - 46
.github/ISSUE_TEMPLATE/feature_request.md

@@ -1,46 +0,0 @@
----
-name: "\U0001F680 Feature request"
-about: Propose a new feature
-title: ''
-labels: status/triage, type/feature
-assignees: ''
-
----
-
-<!--
-
-Don't forget to check for existing issues/discussions regarding your proposal. We might already have it.
-https://github.com/provectus/kafka-ui/issues
-https://github.com/provectus/kafka-ui/discussions
-
--->
-
-### Which version of the app are you running?
-<!-- Please provide docker image version or check commit hash in the top left corner in UI) -->
-
-### Is your proposal related to a problem?
-
-<!--
-  Provide a clear and concise description of what the problem is.
-  For example, "I'm always frustrated when..."
--->
-
-### Describe the solution you'd like
-
-<!--
-  Provide a clear and concise description of what you want to happen.
--->
-
-### Describe alternatives you've considered
-
-<!--
-  Let us know about other solutions you've tried or researched.
--->
-
-### Additional context
-
-<!--
-  Is there anything else you can add about the proposal?
-  You might want to link to related issues here, if you haven't already.
--->
-

+ 0 - 52
.github/ISSUE_TEMPLATE/k8s.md

@@ -1,52 +0,0 @@
----
-name: "⎈ K8s/Helm problem report"
-about: Report a problem with k8s/helm charts/etc
-title: ''
-labels: scope/k8s, status/triage
-assignees: azatsafin
-
----
-
-<!--
-
-Don't forget to check for existing issues/discussions regarding your proposal. We might already have it.
-https://github.com/provectus/kafka-ui/issues
-https://github.com/provectus/kafka-ui/discussions
-
--->
-
-**Describe the bug**
-<!--(A clear and concise description of what the bug is.)-->
-
-
-**Set up**
-<!--
-How do you run the app? Please provide as much info as possible:
-1. App version (docker image version or check commit hash in the top left corner in UI)
-2. Helm chart version, if you use one
-3. Any IAAC configs
-
-We might close the issue without further explanation if you don't provide such information.
--->
-
-
-**Steps to Reproduce**
-Steps to reproduce the behavior:
-
-1. 
-
-**Expected behavior**
-<!--
-(A clear and concise description of what you expected to happen)
--->
-
-**Screenshots**
-<!--
-(If applicable, add screenshots to help explain your problem)
--->
-
-
-**Additional context**
-<!--
-(Add any other context about the problem here)
--->

+ 0 - 4
.github/dependabot.yml

@@ -8,8 +8,6 @@ updates:
     timezone: Europe/Moscow
     timezone: Europe/Moscow
   reviewers:
   reviewers:
     - "Haarolean"
     - "Haarolean"
-  assignees:
-    - "Haarolean"
   labels:
   labels:
     - "scope/backend"
     - "scope/backend"
     - "type/dependencies"
     - "type/dependencies"
@@ -99,8 +97,6 @@ updates:
     timezone: Europe/Moscow
     timezone: Europe/Moscow
   reviewers:
   reviewers:
     - "Haarolean"
     - "Haarolean"
-  assignees:
-    - "Haarolean"
   labels:
   labels:
     - "scope/infrastructure"
     - "scope/infrastructure"
     - "type/dependencies"
     - "type/dependencies"

+ 8 - 0
.github/release_drafter.yaml

@@ -16,18 +16,26 @@ exclude-labels:
   - 'type/refactoring'
   - 'type/refactoring'
 
 
 categories:
 categories:
+  - title: '🚩 Breaking Changes'
+    labels:
+      - 'impact/changelog'
+
   - title: '⚙️Features'
   - title: '⚙️Features'
     labels:
     labels:
       - 'type/feature'
       - 'type/feature'
+
   - title: '🪛Enhancements'
   - title: '🪛Enhancements'
     labels:
     labels:
       - 'type/enhancement'
       - 'type/enhancement'
+
   - title: '🔨Bug Fixes'
   - title: '🔨Bug Fixes'
     labels:
     labels:
       - 'type/bug'
       - 'type/bug'
+
   - title: 'Security'
   - title: 'Security'
     labels:
     labels:
       - 'type/security'
       - 'type/security'
+
   - title: '⎈ Helm/K8S Changes'
   - title: '⎈ Helm/K8S Changes'
     labels:
     labels:
       - 'scope/k8s'
       - 'scope/k8s'

+ 2 - 2
.github/workflows/aws_publisher.yaml

@@ -1,4 +1,4 @@
-name: AWS Marketplace Publisher
+name: "Infra: Release: AWS Marketplace Publisher"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
     inputs:
     inputs:
@@ -24,7 +24,7 @@ jobs:
       - name: Clone infra repo
       - name: Clone infra repo
         run: |
         run: |
           echo "Cloning repo..."
           echo "Cloning repo..."
-          git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
+          git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch ${{ github.event.inputs.KafkaUIInfraBranch }}
           echo "Cd to packer DIR..."
           echo "Cd to packer DIR..."
           cd kafka-ui-infra/ami
           cd kafka-ui-infra/ami
           echo "WORK_DIR=$(pwd)" >> $GITHUB_ENV
           echo "WORK_DIR=$(pwd)" >> $GITHUB_ENV

+ 5 - 2
.github/workflows/backend.yml

@@ -1,4 +1,4 @@
-name: Backend build and test
+name: "Backend: PR/master build & test"
 on:
 on:
   push:
   push:
     branches:
     branches:
@@ -8,6 +8,9 @@ on:
     paths:
     paths:
       - "kafka-ui-api/**"
       - "kafka-ui-api/**"
       - "pom.xml"
       - "pom.xml"
+permissions:
+  checks: write
+  pull-requests: write
 jobs:
 jobs:
   build-and-test:
   build-and-test:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
@@ -29,7 +32,7 @@ jobs:
           key: ${{ runner.os }}-sonar
           key: ${{ runner.os }}-sonar
           restore-keys: ${{ runner.os }}-sonar
           restore-keys: ${{ runner.os }}-sonar
       - name: Build and analyze pull request target
       - name: Build and analyze pull request target
-        if: ${{ github.event_name == 'pull_request_target' }}
+        if: ${{ github.event_name == 'pull_request' }}
         env:
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}
           SONAR_TOKEN: ${{ secrets.SONAR_TOKEN_BACKEND }}

+ 2 - 2
.github/workflows/block_merge.yml

@@ -1,4 +1,4 @@
-name: Pull Request Labels
+name: "Infra: PR block merge"
 on:
 on:
   pull_request:
   pull_request:
     types: [opened, labeled, unlabeled, synchronize]
     types: [opened, labeled, unlabeled, synchronize]
@@ -6,7 +6,7 @@ jobs:
   block_merge:
   block_merge:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
-      - uses: mheap/github-action-required-labels@v4
+      - uses: mheap/github-action-required-labels@v5
         with:
         with:
           mode: exactly
           mode: exactly
           count: 0
           count: 0

+ 18 - 14
.github/workflows/branch-deploy.yml

@@ -1,4 +1,4 @@
-name: Feature testing init
+name: "Infra: Feature Testing: Init env"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
 
 
@@ -73,29 +73,33 @@ jobs:
     steps:
     steps:
       - name: clone
       - name: clone
         run: |
         run: |
-          git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
+          git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
       - name: create deployment
       - name: create deployment
         run: |
         run: |
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           echo "Branch:${{ needs.build.outputs.tag }}"
           echo "Branch:${{ needs.build.outputs.tag }}"
           ./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.label.name }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
           ./kafka-ui-deployment-from-branch.sh ${{ needs.build.outputs.tag }} ${{ github.event.label.name }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }}
-          git config --global user.email "kafka-ui-infra@provectus.com"
-          git config --global user.name "kafka-ui-infra"
+          git config --global user.email "infra-tech@provectus.com"
+          git config --global user.name "infra-tech"
           git add ../kafka-ui-from-branch/
           git add ../kafka-ui-from-branch/
           git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
           git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
 
 
-      - name: make comment with private deployment link
+      - name: update status check for private deployment
         if: ${{ github.event.label.name == 'status/feature_testing' }}
         if: ${{ github.event.label.name == 'status/feature_testing' }}
-        uses: peter-evans/create-or-update-comment@v3
+        uses: Sibz/github-status-action@v1.1.6
         with:
         with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
+          authToken: ${{secrets.GITHUB_TOKEN}}
+          context: "Click Details button to open custom deployment page"
+          state: "success"
+          sha: ${{ github.event.pull_request.head.sha  || github.sha }}
+          target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
 
 
-      - name: make comment with public deployment link
+      - name: update status check for public deployment
         if: ${{ github.event.label.name == 'status/feature_testing_public' }}
         if: ${{ github.event.label.name == 'status/feature_testing_public' }}
-        uses: peter-evans/create-or-update-comment@v3
+        uses: Sibz/github-status-action@v1.1.6
         with:
         with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Custom deployment will be available at http://${{ needs.build.outputs.tag }}.kafka-ui.provectus.io in 5 minutes
+          authToken: ${{secrets.GITHUB_TOKEN}}
+          context: "Click Details button to open custom deployment page"
+          state: "success"
+          sha: ${{ github.event.pull_request.head.sha  || github.sha }}
+          target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"

+ 4 - 10
.github/workflows/branch-remove.yml

@@ -1,4 +1,4 @@
-name: Feature testing destroy
+name: "Infra: Feature Testing: Destroy env"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
   pull_request:
   pull_request:
@@ -11,18 +11,12 @@ jobs:
       - uses: actions/checkout@v3
       - uses: actions/checkout@v3
       - name: clone
       - name: clone
         run: |
         run: |
-          git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
+          git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
       - name: remove env
       - name: remove env
         run: |
         run: |
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           ./delete-env.sh pr${{ github.event.pull_request.number }} || true
           ./delete-env.sh pr${{ github.event.pull_request.number }} || true
-          git config --global user.email "kafka-ui-infra@provectus.com"
-          git config --global user.name "kafka-ui-infra"
+          git config --global user.email "infra-tech@provectus.com"
+          git config --global user.name "infra-tech"
           git add ../kafka-ui-from-branch/
           git add ../kafka-ui-from-branch/
           git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
           git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
-      - name: make comment with deployment link
-        uses: peter-evans/create-or-update-comment@v3
-        with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Custom deployment removed

+ 1 - 2
.github/workflows/build-public-image.yml

@@ -1,4 +1,4 @@
-name: Build Docker image and push
+name: "Infra: Image Testing: Deploy"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
   pull_request:
   pull_request:
@@ -70,6 +70,5 @@ jobs:
           issue-number: ${{ github.event.pull_request.number }}
           issue-number: ${{ github.event.pull_request.number }}
           body: |
           body: |
             Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
             Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
-
     outputs:
     outputs:
       tag: ${{ steps.extract_branch.outputs.tag }}
       tag: ${{ steps.extract_branch.outputs.tag }}

+ 0 - 28
.github/workflows/create-branch-for-helm.yaml

@@ -1,28 +0,0 @@
-name: Prepare helm release
-on:
-  repository_dispatch:
-    types: [prepare-helm-release]
-jobs:
-  change-app-version:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - run: |
-          git config user.name github-actions
-          git config user.email github-actions@github.com
-      - name: Change versions
-        run: |
-          git checkout -b release-${{ github.event.client_payload.appversion}}
-          version=$(cat charts/kafka-ui/Chart.yaml  | grep version | awk '{print $2}')
-          version=${version%.*}.$((${version##*.}+1))
-          sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
-          sed -i "s/appVersion:.*/appVersion: ${{ github.event.client_payload.appversion}}/" charts/kafka-ui/Chart.yaml
-          git add  charts/kafka-ui/Chart.yaml
-          git commit -m "release ${version}"
-          git push --set-upstream origin release-${{ github.event.client_payload.appversion}}
-      - name: Slack Notification
-        uses: rtCamp/action-slack-notify@v2
-        env:
-          SLACK_TITLE: "release-${{ github.event.client_payload.appversion}}"
-          SLACK_MESSAGE: "A new release of the helm chart has been prepared. Branch name: release-${{ github.event.client_payload.appversion}}"
-          SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

+ 1 - 1
.github/workflows/cve.yaml

@@ -55,7 +55,7 @@ jobs:
           cache-to: type=local,dest=/tmp/.buildx-cache
           cache-to: type=local,dest=/tmp/.buildx-cache
 
 
       - name: Run CVE checks
       - name: Run CVE checks
-        uses: aquasecurity/trivy-action@0.10.0
+        uses: aquasecurity/trivy-action@0.11.2
         with:
         with:
           image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
           image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
           format: "table"
           format: "table"

+ 1 - 7
.github/workflows/delete-public-image.yml

@@ -1,4 +1,4 @@
-name: Delete Public ECR Image
+name: "Infra: Image Testing: Delete"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
   pull_request:
   pull_request:
@@ -32,9 +32,3 @@ jobs:
                 --repository-name kafka-ui-custom-build \
                 --repository-name kafka-ui-custom-build \
                 --image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
                 --image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
                 --region us-east-1
                 --region us-east-1
-      - name: make comment with private deployment link
-        uses: peter-evans/create-or-update-comment@v3
-        with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Image tag public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }} has been removed

+ 1 - 1
.github/workflows/documentation.yaml

@@ -1,4 +1,4 @@
-name: Documentation URLs linter
+name: "Infra: Docs: URL linter"
 on:
 on:
   pull_request:
   pull_request:
     types:
     types:

+ 1 - 1
.github/workflows/e2e-automation.yml

@@ -1,4 +1,4 @@
-name: E2E Automation suite
+name: "E2E: Automation suite"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
     inputs:
     inputs:

+ 6 - 4
.github/workflows/e2e-checks.yaml

@@ -1,4 +1,4 @@
-name: E2E PR health check
+name: "E2E: PR healthcheck"
 on:
 on:
   pull_request_target:
   pull_request_target:
     types: [ "opened", "edited", "reopened", "synchronize" ]
     types: [ "opened", "edited", "reopened", "synchronize" ]
@@ -8,6 +8,8 @@ on:
       - "kafka-ui-react-app/**"
       - "kafka-ui-react-app/**"
       - "kafka-ui-e2e-checks/**"
       - "kafka-ui-e2e-checks/**"
       - "pom.xml"
       - "pom.xml"
+permissions:
+  statuses: write
 jobs:
 jobs:
   build-and-test:
   build-and-test:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
@@ -18,8 +20,8 @@ jobs:
       - name: Configure AWS credentials
       - name: Configure AWS credentials
         uses: aws-actions/configure-aws-credentials@v2
         uses: aws-actions/configure-aws-credentials@v2
         with:
         with:
-          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
-          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
           aws-region: eu-central-1
           aws-region: eu-central-1
       - name: Set up environment
       - name: Set up environment
         id: set_env_values
         id: set_env_values
@@ -45,7 +47,7 @@ jobs:
         # use the following command until #819 will be fixed
         # use the following command until #819 will be fixed
         run: |
         run: |
           docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
           docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
-          docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
+          docker-compose -f ./documentation/compose/e2e-tests.yaml up -d && until [ "$(docker exec  kafka-ui wget --spider  --server-response  http://localhost:8080/actuator/health 2>&1 |  grep -c 'HTTP/1.1 200 OK')" == "1" ]; do echo "Waiting for kafka-ui ..." && sleep 1; done
       - name: Run test suite
       - name: Run test suite
         run: |
         run: |
           ./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
           ./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}

+ 1 - 1
.github/workflows/e2e-manual.yml

@@ -1,4 +1,4 @@
-name: E2E Manual suite
+name: "E2E: Manual suite"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
     inputs:
     inputs:

+ 1 - 1
.github/workflows/e2e-weekly.yml

@@ -1,4 +1,4 @@
-name: E2E Weekly suite
+name: "E2E: Weekly suite"
 on:
 on:
   schedule:
   schedule:
     - cron: '0 1 * * 1'
     - cron: '0 1 * * 1'

+ 5 - 2
.github/workflows/frontend.yaml

@@ -1,4 +1,4 @@
-name: Frontend build and test
+name: "Frontend: PR/master build & test"
 on:
 on:
   push:
   push:
     branches:
     branches:
@@ -8,6 +8,9 @@ on:
     paths:
     paths:
       - "kafka-ui-contract/**"
       - "kafka-ui-contract/**"
       - "kafka-ui-react-app/**"
       - "kafka-ui-react-app/**"
+permissions:
+  checks: write
+  pull-requests: write
 jobs:
 jobs:
   build-and-test:
   build-and-test:
     env:
     env:
@@ -24,7 +27,7 @@ jobs:
         with:
         with:
           version: 7.4.0
           version: 7.4.0
       - name: Install node
       - name: Install node
-        uses: actions/setup-node@v3.6.0
+        uses: actions/setup-node@v3.7.0
         with:
         with:
           node-version: "16.15.0"
           node-version: "16.15.0"
           cache: "pnpm"
           cache: "pnpm"

+ 0 - 38
.github/workflows/helm.yaml

@@ -1,38 +0,0 @@
-name: Helm linter
-on:
- pull_request:
-  types: ["opened", "edited", "reopened", "synchronize"]
-  branches:
-   - 'master'
-  paths:
-   - "charts/**"
-jobs:
-  build-and-test:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - name: Helm tool installer
-        uses: Azure/setup-helm@v3
-      - name: Setup Kubeval
-        uses: lra/setup-kubeval@v1.0.1
-      #check, was helm version increased in Chart.yaml?
-      - name: Check version
-        shell: bash
-        run: |
-          helm_version_new=$(cat charts/kafka-ui/Chart.yaml  | grep version | awk  '{print $2}')
-          helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml |   grep version | awk  '{print $2}' )
-          echo $helm_version_old
-          echo $helm_version_new
-          if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
-      - name: Run kubeval
-        shell: bash
-        run: |
-          sed -i "s@enabled: false@enabled: true@g" charts/kafka-ui/values.yaml
-          K8S_VERSIONS=$(git ls-remote --refs --tags https://github.com/kubernetes/kubernetes.git | cut -d/ -f3 | grep -e '^v1\.[0-9]\{2\}\.[0]\{1,2\}$' | grep -v -e  '^v1\.1[0-7]\{1\}' | cut -c2-)
-          echo "NEXT K8S VERSIONS ARE GOING TO BE TESTED: $K8S_VERSIONS"
-          echo ""
-          for version in $K8S_VERSIONS
-            do
-              echo $version;
-              helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
-            done

+ 5 - 4
.github/workflows/master.yaml

@@ -1,4 +1,4 @@
-name: Master branch build & deploy
+name: "Master: Build & deploy"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
   push:
   push:
@@ -58,6 +58,7 @@ jobs:
           builder: ${{ steps.buildx.outputs.name }}
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api
           context: kafka-ui-api
           platforms: linux/amd64,linux/arm64
           platforms: linux/amd64,linux/arm64
+          provenance: false
           push: true
           push: true
           tags: |
           tags: |
             provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
             provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
@@ -73,11 +74,11 @@ jobs:
 #################################
 #################################
       - name: update-master-deployment
       - name: update-master-deployment
         run: |
         run: |
-          git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git --branch master
+          git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch master
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
           echo "Image digest is:${{ steps.docker_build_and_push.outputs.digest }}"
           ./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
           ./kafka-ui-update-master-digest.sh ${{ steps.docker_build_and_push.outputs.digest }}
-          git config --global user.email "kafka-ui-infra@provectus.com"
-          git config --global user.name "kafka-ui-infra"
+          git config --global user.email "infra-tech@provectus.com"
+          git config --global user.name "infra-tech"
           git add ../kafka-ui/*
           git add ../kafka-ui/*
           git commit -m "updated master image digest: ${{ steps.docker_build_and_push.outputs.digest }}" && git push
           git commit -m "updated master image digest: ${{ steps.docker_build_and_push.outputs.digest }}" && git push

+ 4 - 3
.github/workflows/pr-checks.yaml

@@ -1,13 +1,14 @@
-name: "PR Checklist checked"
+name: "PR: Checklist linter"
 on:
 on:
   pull_request_target:
   pull_request_target:
     types: [opened, edited, synchronize, reopened]
     types: [opened, edited, synchronize, reopened]
-
+permissions:
+  checks: write
 jobs:
 jobs:
   task-check:
   task-check:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
     steps:
     steps:
-      - uses: kentaro-m/task-completed-checker-action@v0.1.1
+      - uses: kentaro-m/task-completed-checker-action@v0.1.2
         with:
         with:
           repo-token: "${{ secrets.GITHUB_TOKEN }}"
           repo-token: "${{ secrets.GITHUB_TOKEN }}"
       - uses: dekinderfiets/pr-description-enforcer@0.0.1
       - uses: dekinderfiets/pr-description-enforcer@0.0.1

+ 0 - 39
.github/workflows/release-helm.yaml

@@ -1,39 +0,0 @@
-name: Release helm
-on:
- push:
-    branches:
-     - master
-    paths:
-      - "charts/**"
-
-jobs:
- release-helm:
-  runs-on:
-   ubuntu-latest
-  steps:
-      - uses: actions/checkout@v3
-        with:
-          fetch-depth: 1
-
-      - run: |
-          git config user.name github-actions
-          git config user.email github-actions@github.com
-
-      - uses: azure/setup-helm@v3
-
-      - name: add chart #realse helm with new version
-        run: |
-          VERSION=$(cat charts/kafka-ui/Chart.yaml  | grep version | awk '{print $2}')
-          echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
-          MSG=$(helm package charts/kafka-ui)
-          git fetch origin
-          git stash
-          git checkout -b gh-pages origin/gh-pages
-          git pull
-          helm repo index .
-          git add -f ${MSG##*/} index.yaml
-          git commit -m "release ${VERSION}"
-          git push
-      - uses: rickstaa/action-create-tag@v1 #create new tag
-        with:
-          tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"

+ 1 - 1
.github/workflows/release-serde-api.yaml

@@ -1,4 +1,4 @@
-name: Release serde api
+name: "Infra: Release: Serde API"
 on: workflow_dispatch
 on: workflow_dispatch
 
 
 jobs:
 jobs:

+ 5 - 6
.github/workflows/release.yaml

@@ -1,4 +1,4 @@
-name: Release
+name: "Infra: Release"
 on:
 on:
   release:
   release:
     types: [published]
     types: [published]
@@ -34,7 +34,7 @@ jobs:
           echo "version=${VERSION}" >> $GITHUB_OUTPUT
           echo "version=${VERSION}" >> $GITHUB_OUTPUT
 
 
       - name: Upload files to a GitHub release
       - name: Upload files to a GitHub release
-        uses: svenstaro/upload-release-action@2.5.0
+        uses: svenstaro/upload-release-action@2.6.1
         with:
         with:
           repo_token: ${{ secrets.GITHUB_TOKEN }}
           repo_token: ${{ secrets.GITHUB_TOKEN }}
           file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
           file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
@@ -77,6 +77,7 @@ jobs:
           builder: ${{ steps.buildx.outputs.name }}
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api
           context: kafka-ui-api
           platforms: linux/amd64,linux/arm64
           platforms: linux/amd64,linux/arm64
+          provenance: false
           push: true
           push: true
           tags: |
           tags: |
             provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
             provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
@@ -88,14 +89,12 @@ jobs:
 
 
   charts:
   charts:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
-    permissions:
-      contents: write
     needs: release
     needs: release
     steps:
     steps:
       - name: Repository Dispatch
       - name: Repository Dispatch
         uses: peter-evans/repository-dispatch@v2
         uses: peter-evans/repository-dispatch@v2
         with:
         with:
-          token: ${{ secrets.GITHUB_TOKEN }}
-          repository: provectus/kafka-ui
+          token: ${{ secrets.CHARTS_ACTIONS_TOKEN }}
+          repository: provectus/kafka-ui-charts
           event-type: prepare-helm-release
           event-type: prepare-helm-release
           client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'
           client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'

+ 17 - 2
.github/workflows/release_drafter.yml

@@ -1,19 +1,34 @@
-name: Release Drafter
+name: "Infra: Release Drafter run"
 
 
 on:
 on:
   push:
   push:
-    # branches to consider in the event; optional, defaults to all
     branches:
     branches:
       - master
       - master
   workflow_dispatch:
   workflow_dispatch:
+    inputs:
+      version:
+        description: 'Release version'
+        required: false
+      branch:
+        description: 'Target branch'
+        required: false
+        default: 'master'
+
+permissions:
+  contents: read
 
 
 jobs:
 jobs:
   update_release_draft:
   update_release_draft:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest
+    permissions:
+      contents: write
+      pull-requests: write
     steps:
     steps:
       - uses: release-drafter/release-drafter@v5
       - uses: release-drafter/release-drafter@v5
         with:
         with:
           config-name: release_drafter.yaml
           config-name: release_drafter.yaml
           disable-autolabeler: true
           disable-autolabeler: true
+          version: ${{ github.event.inputs.version }}
+          commitish: ${{ github.event.inputs.branch }}
         env:
         env:
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
           GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

+ 4 - 4
.github/workflows/separate_env_public_create.yml

@@ -1,4 +1,4 @@
-name: Separate environment create
+name: "Infra: Feature Testing Public: Init env"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
     inputs:
     inputs:
@@ -76,14 +76,14 @@ jobs:
     steps:
     steps:
       - name: clone
       - name: clone
         run: |
         run: |
-          git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
+          git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
 
 
       - name: separate env create
       - name: separate env create
         run: |
         run: |
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }} ${{ needs.build.outputs.tag }}
           bash separate_env_create.sh ${{ github.event.inputs.ENV_NAME }} ${{ secrets.FEATURE_TESTING_UI_PASSWORD }} ${{ needs.build.outputs.tag }}
-          git config --global user.email "kafka-ui-infra@provectus.com"
-          git config --global user.name "kafka-ui-infra"
+          git config --global user.email "infra-tech@provectus.com"
+          git config --global user.name "infra-tech"
           git add -A
           git add -A
           git commit -m "separate env added: ${{ github.event.inputs.ENV_NAME }}" && git push || true
           git commit -m "separate env added: ${{ github.event.inputs.ENV_NAME }}" && git push || true
 
 

+ 4 - 4
.github/workflows/separate_env_public_remove.yml

@@ -1,4 +1,4 @@
-name: Separate environment remove
+name: "Infra: Feature Testing Public: Destroy env"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
     inputs:
     inputs:
@@ -13,12 +13,12 @@ jobs:
     steps:
     steps:
       - name: clone
       - name: clone
         run: |
         run: |
-          git clone https://kafka-ui-infra:${{ secrets.KAFKA_UI_INFRA_TOKEN }}@gitlab.provectus.com/provectus-internals/kafka-ui-infra.git
+          git clone https://infra-tech:${{ secrets.INFRA_USER_ACCESS_TOKEN }}@github.com/provectus/kafka-ui-infra.git --branch envs
       - name: separate environment remove
       - name: separate environment remove
         run: |
         run: |
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           cd kafka-ui-infra/aws-infrastructure4eks/argocd/scripts
           bash separate_env_remove.sh ${{ github.event.inputs.ENV_NAME }}
           bash separate_env_remove.sh ${{ github.event.inputs.ENV_NAME }}
-          git config --global user.email "kafka-ui-infra@provectus.com"
-          git config --global user.name "kafka-ui-infra"
+          git config --global user.email "infra-tech@provectus.com"
+          git config --global user.name "infra-tech"
           git add -A
           git add -A
           git commit -m "separate env removed: ${{ github.event.inputs.ENV_NAME }}" && git push || true
           git commit -m "separate env removed: ${{ github.event.inputs.ENV_NAME }}" && git push || true

+ 1 - 1
.github/workflows/stale.yaml

@@ -1,4 +1,4 @@
-name: 'Close stale issues'
+name: 'Infra: Close stale issues'
 on:
 on:
   schedule:
   schedule:
     - cron: '30 1 * * *'
     - cron: '30 1 * * *'

+ 1 - 1
.github/workflows/terraform-deploy.yml

@@ -1,4 +1,4 @@
-name: Terraform deploy
+name: "Infra: Terraform deploy"
 on:
 on:
   workflow_dispatch:
   workflow_dispatch:
     inputs:
     inputs:

+ 1 - 1
.github/workflows/triage_issues.yml

@@ -1,4 +1,4 @@
-name: Add triage label to new issues
+name: "Infra: Triage: Apply triage label for issues"
 on:
 on:
   issues:
   issues:
     types:
     types:

+ 1 - 1
.github/workflows/triage_prs.yml

@@ -1,4 +1,4 @@
-name: Add triage label to new PRs
+name: "Infra: Triage: Apply triage label for PRs"
 on:
 on:
   pull_request:
   pull_request:
     types:
     types:

+ 3 - 1
.github/workflows/welcome-first-time-contributors.yml

@@ -7,7 +7,9 @@ on:
   issues:
   issues:
     types:
     types:
       - opened
       - opened
-
+permissions:
+  issues: write
+  pull-requests: write
 jobs:
 jobs:
   welcome:
   welcome:
     runs-on: ubuntu-latest
     runs-on: ubuntu-latest

+ 1 - 1
.github/workflows/workflow_linter.yaml

@@ -1,4 +1,4 @@
-name: "Workflow linter"
+name: "Infra: Workflow linter"
 on:
 on:
   pull_request:
   pull_request:
     types:
     types:

+ 3 - 0
.gitignore

@@ -31,6 +31,9 @@ build/
 .vscode/
 .vscode/
 /kafka-ui-api/app/node
 /kafka-ui-api/app/node
 
 
+### SDKMAN ###
+.sdkmanrc
+
 .DS_Store
 .DS_Store
 *.code-workspace
 *.code-workspace
 
 

+ 1 - 1
README.md

@@ -99,7 +99,7 @@ services:
     ports:
     ports:
       - 8080:8080
       - 8080:8080
     environment:
     environment:
-      DYNAMIC_CONFIG_ENABLED: true
+      DYNAMIC_CONFIG_ENABLED: 'true'
     volumes:
     volumes:
       - ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
       - ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
 ```
 ```

+ 2 - 1
SECURITY.md

@@ -6,7 +6,8 @@ Following versions of the project are currently being supported with security up
 
 
 | Version | Supported          |
 | Version | Supported          |
 | ------- | ------------------ |
 | ------- | ------------------ |
-| 0.6.x   | :white_check_mark: |
+| 0.7.x   | :white_check_mark: |
+| 0.6.x   | :x:                |
 | 0.5.x   | :x:                |
 | 0.5.x   | :x:                |
 | 0.4.x   | :x:                |
 | 0.4.x   | :x:                |
 | 0.3.x   | :x:                |
 | 0.3.x   | :x:                |

+ 0 - 25
charts/kafka-ui/.helmignore

@@ -1,25 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*.orig
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
-example/
-README.md

+ 0 - 7
charts/kafka-ui/Chart.yaml

@@ -1,7 +0,0 @@
-apiVersion: v2
-name: kafka-ui
-description: A Helm chart for kafka-UI
-type: application
-version: 0.6.2
-appVersion: v0.6.2
-icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png

+ 0 - 1
charts/kafka-ui/README.md

@@ -1 +0,0 @@
-Please refer to our [documentation](https://docs.kafka-ui.provectus.io/configuration/helm-charts) to get some info on our helm charts.

+ 0 - 3
charts/kafka-ui/index.yaml

@@ -1,3 +0,0 @@
-apiVersion: v1
-entries: {}
-generated: "2021-11-11T12:26:08.479581+03:00"

+ 0 - 21
charts/kafka-ui/templates/NOTES.txt

@@ -1,21 +0,0 @@
-1. Get the application URL by running these commands:
-{{- if .Values.ingress.enabled }}
-{{- range $host := .Values.ingress.hosts }}
-  {{- range .paths }}
-  http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
-  {{- end }}
-{{- end }}
-{{- else if contains "NodePort" .Values.service.type }}
-  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
-  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
-  echo http://$NODE_IP:$NODE_PORT
-{{- else if contains "LoadBalancer" .Values.service.type }}
-     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
-           You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
-  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
-  echo http://$SERVICE_IP:{{ .Values.service.port }}
-{{- else if contains "ClusterIP" .Values.service.type }}
-  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
-  echo "Visit http://127.0.0.1:8080 to use your application"
-  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
-{{- end }}

+ 0 - 84
charts/kafka-ui/templates/_helpers.tpl

@@ -1,84 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "kafka-ui.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "kafka-ui.fullname" -}}
-{{- if .Values.fullnameOverride }}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- $name := default .Chart.Name .Values.nameOverride }}
-{{- if contains $name .Release.Name }}
-{{- .Release.Name | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
-{{- end }}
-{{- end }}
-{{- end }}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "kafka-ui.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Common labels
-*/}}
-{{- define "kafka-ui.labels" -}}
-helm.sh/chart: {{ include "kafka-ui.chart" . }}
-{{ include "kafka-ui.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end }}
-
-{{/*
-Selector labels
-*/}}
-{{- define "kafka-ui.selectorLabels" -}}
-app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end }}
-
-{{/*
-Create the name of the service account to use
-*/}}
-{{- define "kafka-ui.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create }}
-{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
-{{- else }}
-{{- default "default" .Values.serviceAccount.name }}
-{{- end }}
-{{- end }}
-
-
-{{/*
-This allows us to check if the registry of the image is specified or not.
-*/}}
-{{- define "kafka-ui.imageName" -}}
-{{- $registryName := .Values.image.registry -}}
-{{- if .Values.global }}
-    {{- if .Values.global.imageRegistry }}
-     {{- $registryName = .Values.global.imageRegistry -}}
-    {{- end -}}
-{{- end -}}
-{{- $repository := .Values.image.repository -}}
-{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
-{{- if $registryName }}
-{{- printf "%s/%s:%s" $registryName $repository $tag -}}
-{{- else }}
-{{- printf "%s:%s" $repository $tag -}}
-{{- end }}
-{{- end -}}
-

+ 0 - 10
charts/kafka-ui/templates/configmap.yaml

@@ -1,10 +0,0 @@
-{{- if .Values.envs.config -}}
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-data:
-  {{- toYaml .Values.envs.config | nindent 2 }}
-{{- end -}}

+ 0 - 11
charts/kafka-ui/templates/configmap_fromValues.yaml

@@ -1,11 +0,0 @@
-{{- if .Values.yamlApplicationConfig -}}
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}-fromvalues
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-data:
-  config.yml: |-
-    {{- toYaml .Values.yamlApplicationConfig | nindent 4}}
-{{ end }}

+ 0 - 150
charts/kafka-ui/templates/deployment.yaml

@@ -1,150 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-  {{- with .Values.annotations }}
-  annotations:
-    {{- toYaml . | nindent 4 }}
-  {{- end }}
-spec:
-{{- if not .Values.autoscaling.enabled }}
-  replicas: {{ .Values.replicaCount }}
-{{- end }}
-  selector:
-    matchLabels:
-      {{- include "kafka-ui.selectorLabels" . | nindent 6 }}
-  template:
-    metadata:
-      annotations:
-      {{- with .Values.podAnnotations }}
-          {{- toYaml . | nindent 8 }}
-      {{- end }}
-        checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
-        checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
-        checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
-      labels:
-        {{- include "kafka-ui.selectorLabels" . | nindent 8 }}
-        {{- if .Values.podLabels }}
-        {{- toYaml .Values.podLabels | nindent 8 }}
-        {{- end }}
-    spec:
-      {{- with .Values.imagePullSecrets }}
-      imagePullSecrets:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .Values.initContainers }}
-      initContainers:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
-      securityContext:
-        {{- toYaml .Values.podSecurityContext | nindent 8 }}
-      containers:
-        - name: {{ .Chart.Name }}
-          securityContext:
-            {{- toYaml .Values.securityContext | nindent 12 }}
-          image: {{ include "kafka-ui.imageName" . }}
-          imagePullPolicy: {{ .Values.image.pullPolicy }}
-          {{- if or .Values.env  .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
-          env:
-            {{- with .Values.env }}
-              {{- toYaml . | nindent 12 }}
-            {{- end }}
-            {{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
-            - name: SPRING_CONFIG_ADDITIONAL-LOCATION
-              {{- if .Values.yamlApplicationConfig }}
-              value: /kafka-ui/config.yml
-              {{- else if .Values.yamlApplicationConfigConfigMap }}
-              value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }}
-              {{- end }}
-            {{- end }}
-          {{- end }}
-          envFrom:
-            {{- if .Values.existingConfigMap }}
-            - configMapRef:
-                name: {{ .Values.existingConfigMap }}
-            {{- end }}
-            {{- if .Values.envs.config }}
-            - configMapRef:
-                name: {{ include "kafka-ui.fullname" . }}
-            {{- end }}
-            {{- if .Values.existingSecret }}
-            - secretRef:
-                name: {{ .Values.existingSecret }}
-            {{- end }}
-            {{- if .Values.envs.secret}}
-            - secretRef:
-                name: {{ include "kafka-ui.fullname" . }}
-            {{- end}}    
-          ports:
-            - name: http
-              containerPort: 8080
-              protocol: TCP
-          livenessProbe:
-            httpGet:
-              {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
-              path: {{ get $contextPath "path" }}
-              port: http
-              {{- if .Values.probes.useHttpsScheme }}
-              scheme: HTTPS
-              {{- end }}
-            initialDelaySeconds: 60
-            periodSeconds: 30
-            timeoutSeconds: 10
-          readinessProbe:
-            httpGet:
-              {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
-              path: {{ get $contextPath "path" }}
-              port: http
-              {{- if .Values.probes.useHttpsScheme }}
-              scheme: HTTPS
-              {{- end }}
-            initialDelaySeconds: 60
-            periodSeconds: 30
-            timeoutSeconds: 10
-          resources:
-            {{- toYaml .Values.resources | nindent 12 }}
-          {{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}}
-          volumeMounts:
-            {{- with .Values.volumeMounts }} 
-              {{- toYaml . | nindent 12 }}
-            {{- end }}
-            {{- if .Values.yamlApplicationConfig }}
-            - name: kafka-ui-yaml-conf
-              mountPath: /kafka-ui/
-            {{- end }}
-            {{- if .Values.yamlApplicationConfigConfigMap}}
-            - name: kafka-ui-yaml-conf-configmap
-              mountPath: /kafka-ui/
-            {{- end }}
-          {{- end }}
-      {{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}}
-      volumes:
-        {{- with .Values.volumes }}
-          {{- toYaml . | nindent 8 }}
-        {{- end }}
-        {{- if .Values.yamlApplicationConfig }}
-        - name: kafka-ui-yaml-conf
-          configMap: 
-            name: {{ include "kafka-ui.fullname" . }}-fromvalues
-        {{- end }}
-        {{- if .Values.yamlApplicationConfigConfigMap}}
-        - name: kafka-ui-yaml-conf-configmap
-          configMap: 
-            name: {{ .Values.yamlApplicationConfigConfigMap.name }}
-        {{- end }}
-      {{- end }}
-      {{- with .Values.nodeSelector }}
-      nodeSelector:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .Values.affinity }}
-      affinity:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .Values.tolerations }}
-      tolerations:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}

+ 0 - 46
charts/kafka-ui/templates/hpa.yaml

@@ -1,46 +0,0 @@
-{{- if .Values.autoscaling.enabled }}
-{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
-{{- $isHigher1p25 := ge (semver "1.25" | $kubeCapabilityVersion.Compare) 0 -}}
-{{- if and ($.Capabilities.APIVersions.Has "autoscaling/v2") $isHigher1p25 -}}
-apiVersion: autoscaling/v2
-{{- else  }}
-apiVersion: autoscaling/v2beta1
-{{- end }}
-kind: HorizontalPodAutoscaler
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-spec:
-  scaleTargetRef:
-    apiVersion: apps/v1
-    kind: Deployment
-    name: {{ include "kafka-ui.fullname" . }}
-  minReplicas: {{ .Values.autoscaling.minReplicas }}
-  maxReplicas: {{ .Values.autoscaling.maxReplicas }}
-  metrics:
-  {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
-    - type: Resource
-      resource:
-        name: cpu
-  {{- if  $isHigher1p25 }}
-        target:
-         type: Utilization
-         averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
-  {{- else  }}        
-        targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
-  {{- end }}      
-  {{- end }}
-  {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
-    - type: Resource
-      resource:
-        name: memory
-  {{- if  $isHigher1p25 }}     
-        target:
-          type: Utilization
-          averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
-  {{- else  }}   
-        targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
-  {{- end }}
-  {{- end }}
-{{- end }}

+ 0 - 89
charts/kafka-ui/templates/ingress.yaml

@@ -1,89 +0,0 @@
-{{- if .Values.ingress.enabled -}}
-{{- $fullName := include "kafka-ui.fullname" . -}}
-{{- $svcPort := .Values.service.port -}}
-{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
-{{- $isHigher1p19 := ge (semver "1.19" | $kubeCapabilityVersion.Compare) 0 -}}
-{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
-apiVersion: networking.k8s.io/v1
-{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
-apiVersion: networking.k8s.io/v1beta1
-{{- else }}
-apiVersion: extensions/v1beta1
-{{- end }}
-kind: Ingress
-metadata:
-  name: {{ $fullName }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-  {{- with .Values.ingress.annotations }}
-  annotations:
-    {{- toYaml . | nindent 4 }}
-  {{- end }}
-spec:
-  {{- if .Values.ingress.tls.enabled }}
-  tls:
-    - hosts:
-        - {{ tpl .Values.ingress.host . }}
-      secretName: {{ .Values.ingress.tls.secretName }}
-  {{- end }}
-  {{- if .Values.ingress.ingressClassName }}
-  ingressClassName: {{ .Values.ingress.ingressClassName }}
-  {{- end }}
-  rules:
-    - http:
-        paths:
-{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
-          {{- range .Values.ingress.precedingPaths }}
-          - path: {{ .path }}
-            pathType: {{ .Values.ingress.pathType }}
-            backend:
-              service:
-                name: {{ .serviceName }}
-                port:
-                  number: {{ .servicePort }}
-          {{- end }}
-          - backend:
-              service:
-                name: {{ $fullName }}
-                port:
-                  number: {{ $svcPort }}
-            pathType: {{ .Values.ingress.pathType }}
-{{- if .Values.ingress.path }}
-            path: {{ .Values.ingress.path }}
-{{- end }}
-          {{- range .Values.ingress.succeedingPaths }}
-          - path: {{ .path }}
-            pathType: {{ .Values.ingress.pathType }}
-            backend:
-              service:
-                name: {{ .serviceName }}
-                port:
-                  number: {{ .servicePort }}
-          {{- end }}
-{{- if tpl .Values.ingress.host . }}
-      host: {{tpl .Values.ingress.host . }}
-{{- end }}
-{{- else -}}
-          {{- range .Values.ingress.precedingPaths }}
-          - path: {{ .path }}
-            backend:
-              serviceName: {{ .serviceName }}
-              servicePort: {{ .servicePort }}
-          {{- end }}
-          - backend:
-              serviceName: {{ $fullName }}
-              servicePort: {{ $svcPort }}
-{{- if .Values.ingress.path }}
-            path: {{ .Values.ingress.path }}
-{{- end }}
-          {{- range .Values.ingress.succeedingPaths }}
-          - path: {{ .path }}
-            backend:
-              serviceName: {{ .serviceName }}
-              servicePort: {{ .servicePort }}
-          {{- end }}
-{{- if tpl .Values.ingress.host . }}
-      host: {{ tpl .Values.ingress.host . }}
-{{- end }}
-{{- end }}
-{{- end }}

+ 0 - 18
charts/kafka-ui/templates/networkpolicy-egress.yaml

@@ -1,18 +0,0 @@
-{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
-apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
-  name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-spec:
-  podSelector:
-    matchLabels:
-      {{- include "kafka-ui.selectorLabels" . | nindent 6 }}
-  policyTypes:
-    - Egress
-  egress:
-    {{- if .Values.networkPolicy.egressRules.customRules }}
-    {{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
-    {{- end }}
-{{- end }}

+ 0 - 18
charts/kafka-ui/templates/networkpolicy-ingress.yaml

@@ -1,18 +0,0 @@
-{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
-apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
-  name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-spec:
-  podSelector:
-    matchLabels:
-      {{- include "kafka-ui.selectorLabels" . | nindent 6 }}
-  policyTypes:
-    - Ingress
-  ingress:
-    {{- if .Values.networkPolicy.ingressRules.customRules }}
-    {{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
-    {{- end }}
-{{- end }}

+ 0 - 13
charts/kafka-ui/templates/secret.yaml

@@ -1,13 +0,0 @@
-{{- if .Values.envs.secret -}}
-apiVersion: v1
-kind: Secret
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-type: Opaque
-data:
-  {{- range $key, $val := .Values.envs.secret }}
-  {{ $key }}: {{ $val | b64enc | quote }}
-  {{- end -}}
-{{- end}}

+ 0 - 22
charts/kafka-ui/templates/service.yaml

@@ -1,22 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-{{- if .Values.service.annotations }}
-  annotations:
-{{ toYaml .Values.service.annotations | nindent 4 }}
-{{- end }}
-spec:
-  type: {{ .Values.service.type }}
-  ports:
-    - port: {{ .Values.service.port }}
-      targetPort: http
-      protocol: TCP
-      name: http
-      {{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
-      nodePort: {{ .Values.service.nodePort }}
-      {{- end }}
-  selector:
-    {{- include "kafka-ui.selectorLabels" . | nindent 4 }}

+ 0 - 12
charts/kafka-ui/templates/serviceaccount.yaml

@@ -1,12 +0,0 @@
-{{- if .Values.serviceAccount.create -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: {{ include "kafka-ui.serviceAccountName" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-  {{- with .Values.serviceAccount.annotations }}
-  annotations:
-    {{- toYaml . | nindent 4 }}
-  {{- end }}
-{{- end }}

+ 0 - 161
charts/kafka-ui/values.yaml

@@ -1,161 +0,0 @@
-replicaCount: 1
-
-image:
-  registry: docker.io
-  repository: provectuslabs/kafka-ui
-  pullPolicy: IfNotPresent
-  # Overrides the image tag whose default is the chart appVersion.
-  tag: ""
-
-imagePullSecrets: []
-nameOverride: ""
-fullnameOverride: ""
-
-serviceAccount:
-  # Specifies whether a service account should be created
-  create: true
-  # Annotations to add to the service account
-  annotations: {}
-  # The name of the service account to use.
-  # If not set and create is true, a name is generated using the fullname template
-  name: ""
-
-existingConfigMap: ""
-yamlApplicationConfig:
-  {}
-  # kafka:
-  #   clusters:
-  #     - name: yaml
-  #       bootstrapServers: kafka-service:9092
-  # spring:
-  #   security:
-  #     oauth2:
-  # auth:
-  #   type: disabled
-  # management:
-  #   health:
-  #     ldap:
-  #       enabled: false
-yamlApplicationConfigConfigMap:
-  {}
-  # keyName: config.yml
-  # name: configMapName
-existingSecret: ""
-envs:
-  secret: {}
-  config: {}
-
-networkPolicy:
-  enabled: false
-  egressRules:
-    ## Additional custom egress rules
-    ## e.g:
-    ## customRules:
-    ##   - to:
-    ##       - namespaceSelector:
-    ##           matchLabels:
-    ##             label: example
-    customRules: []
-  ingressRules:
-    ## Additional custom ingress rules
-    ## e.g:
-    ## customRules:
-    ##   - from:
-    ##       - namespaceSelector:
-    ##           matchLabels:
-    ##             label: example
-    customRules: []
-
-podAnnotations: {}
-podLabels: {}
-
-## Annotations to be added to kafka-ui Deployment
-##
-annotations: {}
-
-## Set field schema as HTTPS for readines and liveness probe
-##
-probes:
-  useHttpsScheme: false
-
-podSecurityContext:
-  {}
-  # fsGroup: 2000
-
-securityContext:
-  {}
-  # capabilities:
-  #   drop:
-  #   - ALL
-  # readOnlyRootFilesystem: true
-  # runAsNonRoot: true
-  # runAsUser: 1000
-
-service:
-  type: ClusterIP
-  port: 80
-  # if you want to force a specific nodePort. Must be use with service.type=NodePort
-  # nodePort:
-
-# Ingress configuration
-ingress:
-  # Enable ingress resource
-  enabled: false
-
-  # Annotations for the Ingress
-  annotations: {}
-
-  # ingressClassName for the Ingress
-  ingressClassName: ""
-
-  # The path for the Ingress
-  path: "/"
-
-  # The path type for the Ingress
-  pathType: "Prefix"  
-
-  # The hostname for the Ingress
-  host: ""
-
-  # configs for Ingress TLS
-  tls:
-    # Enable TLS termination for the Ingress
-    enabled: false
-    # the name of a pre-created Secret containing a TLS private key and certificate
-    secretName: ""
-
-  # HTTP paths to add to the Ingress before the default path
-  precedingPaths: []
-
-  # Http paths to add to the Ingress after the default path
-  succeedingPaths: []
-
-resources:
-  {}
-  # limits:
-  #   cpu: 200m
-  #   memory: 512Mi
-  # requests:
-  #   cpu: 200m
-  #   memory: 256Mi
-
-autoscaling:
-  enabled: false
-  minReplicas: 1
-  maxReplicas: 100
-  targetCPUUtilizationPercentage: 80
-  # targetMemoryUtilizationPercentage: 80
-
-nodeSelector: {}
-
-tolerations: []
-
-affinity: {}
-
-env: {}
-
-initContainers: {}
-
-volumeMounts: {}
-
-volumes: {}

+ 2 - 2
documentation/compose/DOCKER_COMPOSE.md

@@ -8,9 +8,9 @@
 6. [kafka-ui-auth-context.yaml](./kafka-ui-auth-context.yaml) - Basic (username/password) authentication with custom path (URL) (issue 861).
 6. [kafka-ui-auth-context.yaml](./kafka-ui-auth-context.yaml) - Basic (username/password) authentication with custom path (URL) (issue 861).
 7. [e2e-tests.yaml](./e2e-tests.yaml) - Configuration with different connectors (github-source, s3, sink-activities, source-activities) and Ksql functionality.
 7. [e2e-tests.yaml](./e2e-tests.yaml) - Configuration with different connectors (github-source, s3, sink-activities, source-activities) and Ksql functionality.
 8. [kafka-ui-jmx-secured.yml](./kafka-ui-jmx-secured.yml) - Kafka’s JMX with SSL and authentication.
 8. [kafka-ui-jmx-secured.yml](./kafka-ui-jmx-secured.yml) - Kafka’s JMX with SSL and authentication.
-9. [kafka-ui-reverse-proxy.yaml](./kafka-ui-reverse-proxy.yaml) - An example for using the app behind a proxy (like nginx).
+9. [kafka-ui-reverse-proxy.yaml](./nginx-proxy.yaml) - An example for using the app behind a proxy (like nginx).
 10. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
 10. [kafka-ui-sasl.yaml](./kafka-ui-sasl.yaml) - SASL auth for Kafka.
-11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
+11. [kafka-ui-traefik-proxy.yaml](./traefik-proxy.yaml) - Traefik specific proxy configuration.
 12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
 12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
 13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
 13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
 14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper
 14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper

+ 0 - 0
documentation/compose/message.json → documentation/compose/data/message.json


+ 0 - 0
documentation/compose/proxy.conf → documentation/compose/data/proxy.conf


+ 2 - 2
documentation/compose/e2e-tests.yaml

@@ -124,7 +124,7 @@ services:
   kafka-init-topics:
   kafka-init-topics:
     image: confluentinc/cp-kafka:7.2.1
     image: confluentinc/cp-kafka:7.2.1
     volumes:
     volumes:
-      - ./message.json:/data/message.json
+      - ./data/message.json:/data/message.json
     depends_on:
     depends_on:
       kafka0:
       kafka0:
         condition: service_healthy
         condition: service_healthy
@@ -187,4 +187,4 @@ services:
       KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
       KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
       KSQL_KSQL_SERVICE_ID: my_ksql_1
       KSQL_KSQL_SERVICE_ID: my_ksql_1
       KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
       KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
-      KSQL_CACHE_MAX_BYTES_BUFFERING: 0
+      KSQL_CACHE_MAX_BYTES_BUFFERING: 0

+ 5 - 1
documentation/compose/jaas/kafka_server.conf

@@ -11,4 +11,8 @@ KafkaClient {
     user_admin="admin-secret";
     user_admin="admin-secret";
 };
 };
 
 
-Client {};
+Client {
+       org.apache.zookeeper.server.auth.DigestLoginModule required
+       username="zkuser"
+       password="zkuserpassword";
+};

+ 4 - 0
documentation/compose/jaas/zookeeper_jaas.conf

@@ -0,0 +1,4 @@
+Server {
+       org.apache.zookeeper.server.auth.DigestLoginModule required
+       user_zkuser="zkuserpassword";
+};

+ 1 - 1
documentation/compose/jmx-exporter/kafka-broker.yml

@@ -1,2 +1,2 @@
 rules:
 rules:
-  - pattern: ".*"
+  - pattern: ".*"

+ 2 - 2
documentation/compose/kafka-cluster-sr-auth.yaml

@@ -57,7 +57,7 @@ services:
   kafka-init-topics:
   kafka-init-topics:
     image: confluentinc/cp-kafka:7.2.1
     image: confluentinc/cp-kafka:7.2.1
     volumes:
     volumes:
-       - ./message.json:/data/message.json
+       - ./data/message.json:/data/message.json
     depends_on:
     depends_on:
       - kafka1
       - kafka1
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
@@ -80,4 +80,4 @@ services:
       KAFKA_CLUSTERS_0_METRICS_PORT: 9997
       KAFKA_CLUSTERS_0_METRICS_PORT: 9997
       KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
       KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
       KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
       KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
-      KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
+      KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein

+ 0 - 84
documentation/compose/kafka-clusters-only.yaml

@@ -1,84 +0,0 @@
----
-version: "2"
-services:
-  kafka0:
-    image: confluentinc/cp-kafka:7.2.1
-    hostname: kafka0
-    container_name: kafka0
-    ports:
-      - "9092:9092"
-      - "9997:9997"
-    environment:
-      KAFKA_BROKER_ID: 1
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
-      KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
-      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
-      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
-      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
-      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
-      KAFKA_JMX_PORT: 9997
-      KAFKA_JMX_HOSTNAME: localhost
-      KAFKA_PROCESS_ROLES: "broker,controller"
-      KAFKA_NODE_ID: 1
-      KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
-      KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
-      KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
-      KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
-      KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
-    volumes:
-      - ./scripts/update_run_cluster.sh:/tmp/update_run.sh
-      - ./scripts/clusterID:/tmp/clusterID
-    command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
-
-  schemaregistry0:
-    image: confluentinc/cp-schema-registry:7.2.1
-    depends_on:
-      - kafka0
-    environment:
-      SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
-      SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
-      SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
-      SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
-
-      SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
-      SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
-      SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
-    ports:
-      - 8085:8085
-
-  kafka-connect0:
-    image: confluentinc/cp-kafka-connect:7.2.1
-    ports:
-      - 8083:8083
-    depends_on:
-      - kafka0
-      - schemaregistry0
-    environment:
-      CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
-      CONNECT_GROUP_ID: compose-connect-group
-      CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
-      CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
-      CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
-      CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
-      CONNECT_STATUS_STORAGE_TOPIC: _connect_status
-      CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
-      CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
-      CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
-      CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
-      CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
-      CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
-      CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
-      CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
-      CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
-
-  kafka-init-topics:
-    image: confluentinc/cp-kafka:7.2.1
-    volumes:
-      - ./message.json:/data/message.json
-    depends_on:
-      - kafka0
-    command: "bash -c 'echo Waiting for Kafka to be ready... && \
-      cub kafka-ready -b kafka0:29092 1 30 && \
-      kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
-      kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
-      kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"

+ 59 - 0
documentation/compose/kafka-ui-acl-with-zk.yaml

@@ -0,0 +1,59 @@
+---
+version: '2'
+services:
+
+  kafka-ui:
+    container_name: kafka-ui
+    image: provectuslabs/kafka-ui:latest
+    ports:
+      - 8080:8080
+    depends_on:
+      - zookeeper
+      - kafka
+    environment:
+      KAFKA_CLUSTERS_0_NAME: local
+      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
+      KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
+      KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
+      KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
+
+  zookeeper:
+    image: wurstmeister/zookeeper:3.4.6
+    environment:
+      JVMFLAGS: "-Djava.security.auth.login.config=/etc/zookeeper/zookeeper_jaas.conf"
+    volumes:
+      - ./jaas/zookeeper_jaas.conf:/etc/zookeeper/zookeeper_jaas.conf
+    ports:
+      - 2181:2181
+
+  kafka:
+    image: confluentinc/cp-kafka:7.2.1
+    hostname: kafka
+    container_name: kafka
+    ports:
+      - "9092:9092"
+      - "9997:9997"
+    environment:
+      KAFKA_BROKER_ID: 1
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
+      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+      KAFKA_ADVERTISED_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
+      KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
+      KAFKA_AUTHORIZER_CLASS_NAME: "kafka.security.authorizer.AclAuthorizer"
+      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+      KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+      KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+      KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+      KAFKA_JMX_PORT: 9997
+      KAFKA_JMX_HOSTNAME: localhost
+      KAFKA_NODE_ID: 1
+      KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
+      KAFKA_LISTENERS: 'SASL_PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+      KAFKA_INTER_BROKER_LISTENER_NAME: 'SASL_PLAINTEXT'
+      KAFKA_SASL_ENABLED_MECHANISMS: 'PLAIN'
+      KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: 'PLAIN'
+      KAFKA_SECURITY_PROTOCOL: 'SASL_PLAINTEXT'
+      KAFKA_SUPER_USERS: 'User:admin'
+    volumes:
+      - ./scripts/update_run.sh:/tmp/update_run.sh
+      - ./jaas:/etc/kafka/jaas

+ 3 - 1
documentation/compose/kafka-ui-arm64.yaml

@@ -20,6 +20,8 @@ services:
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
       DYNAMIC_CONFIG_ENABLED: 'true'  # not necessary, added for tests
       DYNAMIC_CONFIG_ENABLED: 'true'  # not necessary, added for tests
+      KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
+      KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
 
 
   kafka0:
   kafka0:
     image: confluentinc/cp-kafka:7.2.1.arm64
     image: confluentinc/cp-kafka:7.2.1.arm64
@@ -93,7 +95,7 @@ services:
   kafka-init-topics:
   kafka-init-topics:
     image: confluentinc/cp-kafka:7.2.1.arm64
     image: confluentinc/cp-kafka:7.2.1.arm64
     volumes:
     volumes:
-       - ./message.json:/data/message.json
+       - ./data/message.json:/data/message.json
     depends_on:
     depends_on:
       - kafka0
       - kafka0
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
     command: "bash -c 'echo Waiting for Kafka to be ready... && \

+ 2 - 2
documentation/compose/kafka-ui-connectors-auth.yaml

@@ -69,7 +69,7 @@ services:
     build:
     build:
       context: ./kafka-connect
       context: ./kafka-connect
       args:
       args:
-        image: confluentinc/cp-kafka-connect:6.0.1
+        image: confluentinc/cp-kafka-connect:7.2.1
     ports:
     ports:
       - 8083:8083
       - 8083:8083
     depends_on:
     depends_on:
@@ -104,7 +104,7 @@ services:
   kafka-init-topics:
   kafka-init-topics:
     image: confluentinc/cp-kafka:7.2.1
     image: confluentinc/cp-kafka:7.2.1
     volumes:
     volumes:
-      - ./message.json:/data/message.json
+      - ./data/message.json:/data/message.json
     depends_on:
     depends_on:
       - kafka0
       - kafka0
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
     command: "bash -c 'echo Waiting for Kafka to be ready... && \

+ 2 - 2
documentation/compose/kafka-ui.yaml

@@ -115,7 +115,7 @@ services:
       SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
       SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
 
 
   kafka-connect0:
   kafka-connect0:
-    image: confluentinc/cp-kafka-connect:6.0.1
+    image: confluentinc/cp-kafka-connect:7.2.1
     ports:
     ports:
       - 8083:8083
       - 8083:8083
     depends_on:
     depends_on:
@@ -142,7 +142,7 @@ services:
   kafka-init-topics:
   kafka-init-topics:
     image: confluentinc/cp-kafka:7.2.1
     image: confluentinc/cp-kafka:7.2.1
     volumes:
     volumes:
-       - ./message.json:/data/message.json
+       - ./data/message.json:/data/message.json
     depends_on:
     depends_on:
       - kafka1
       - kafka1
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
     command: "bash -c 'echo Waiting for Kafka to be ready... && \

+ 1 - 1
documentation/compose/kafka-with-zookeeper.yaml

@@ -38,7 +38,7 @@ services:
   kafka-init-topics:
   kafka-init-topics:
     image: confluentinc/cp-kafka:7.2.1
     image: confluentinc/cp-kafka:7.2.1
     volumes:
     volumes:
-       - ./message.json:/data/message.json
+       - ./data/message.json:/data/message.json
     depends_on:
     depends_on:
       - kafka
       - kafka
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
     command: "bash -c 'echo Waiting for Kafka to be ready... && \

+ 12 - 15
documentation/compose/auth-ldap.yaml → documentation/compose/ldap.yaml

@@ -15,26 +15,23 @@ services:
       KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
       KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
       KAFKA_CLUSTERS_0_METRICS_PORT: 9997
       KAFKA_CLUSTERS_0_METRICS_PORT: 9997
       KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
       KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
+
       AUTH_TYPE: "LDAP"
       AUTH_TYPE: "LDAP"
       SPRING_LDAP_URLS: "ldap://ldap:10389"
       SPRING_LDAP_URLS: "ldap://ldap:10389"
-      SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
-
-#     ===== USER SEARCH FILTER INSTEAD OF DN =====
-
-#     SPRING_LDAP_USERFILTER_SEARCHBASE: "dc=planetexpress,dc=com"
-#     SPRING_LDAP_USERFILTER_SEARCHFILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
-#     LDAP ADMIN USER
-#     SPRING_LDAP_ADMINUSER: "cn=admin,dc=planetexpress,dc=com"
-#     SPRING_LDAP_ADMINPASSWORD: "GoodNewsEveryone"
-
-#     ===== ACTIVE DIRECTORY =====
-
-#      OAUTH2.LDAP.ACTIVEDIRECTORY: true
-#      OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
+      SPRING_LDAP_BASE: "cn={0},ou=people,dc=planetexpress,dc=com"
+      SPRING_LDAP_ADMIN_USER: "cn=admin,dc=planetexpress,dc=com"
+      SPRING_LDAP_ADMIN_PASSWORD: "GoodNewsEveryone"
+      SPRING_LDAP_USER_FILTER_SEARCH_BASE: "dc=planetexpress,dc=com"
+      SPRING_LDAP_USER_FILTER_SEARCH_FILTER: "(&(uid={0})(objectClass=inetOrgPerson))"
+      SPRING_LDAP_GROUP_FILTER_SEARCH_BASE: "ou=people,dc=planetexpress,dc=com"
+#     OAUTH2.LDAP.ACTIVEDIRECTORY: true
+#     OAUTH2.LDAP.AСTIVEDIRECTORY.DOMAIN: "memelord.lol"
 
 
   ldap:
   ldap:
     image: rroemhild/test-openldap:latest
     image: rroemhild/test-openldap:latest
     hostname: "ldap"
     hostname: "ldap"
+    ports:
+      - 10389:10389
 
 
   kafka0:
   kafka0:
     image: confluentinc/cp-kafka:7.2.1
     image: confluentinc/cp-kafka:7.2.1
@@ -79,4 +76,4 @@ services:
 
 
       SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
       SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
       SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
       SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
-      SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
+      SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas

+ 1 - 1
documentation/compose/kafka-ui-reverse-proxy.yaml → documentation/compose/nginx-proxy.yaml

@@ -4,7 +4,7 @@ services:
   nginx:
   nginx:
     image: nginx:latest
     image: nginx:latest
     volumes:
     volumes:
-      - ./proxy.conf:/etc/nginx/conf.d/default.conf
+      - ./data/proxy.conf:/etc/nginx/conf.d/default.conf
     ports:
     ports:
       - 8080:80
       - 8080:80
 
 

+ 0 - 22
documentation/compose/oauth-cognito.yaml

@@ -1,22 +0,0 @@
----
-version: '3.4'
-services:
-
-  kafka-ui:
-    container_name: kafka-ui
-    image: provectuslabs/kafka-ui:local
-    ports:
-      - 8080:8080
-    depends_on:
-      - kafka0 # OMITTED, TAKE UP AN EXAMPLE FROM OTHER COMPOSE FILES
-    environment:
-      KAFKA_CLUSTERS_0_NAME: local
-      KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
-      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
-      AUTH_TYPE: OAUTH2_COGNITO
-      AUTH_COGNITO_ISSUER_URI: "https://cognito-idp.eu-central-1.amazonaws.com/eu-central-xxxxxx"
-      AUTH_COGNITO_CLIENT_ID: ""
-      AUTH_COGNITO_CLIENT_SECRET: ""
-      AUTH_COGNITO_SCOPE: "openid"
-      AUTH_COGNITO_USER_NAME_ATTRIBUTE: "username"
-      AUTH_COGNITO_LOGOUT_URI: "https://<domain>.auth.eu-central-1.amazoncognito.com/logout"

+ 0 - 0
documentation/compose/kafka-ui-traefik-proxy.yaml → documentation/compose/traefik-proxy.yaml


+ 9 - 4
kafka-ui-api/pom.xml

@@ -12,7 +12,7 @@
     <artifactId>kafka-ui-api</artifactId>
     <artifactId>kafka-ui-api</artifactId>
 
 
     <properties>
     <properties>
-        <jacoco.version>0.8.8</jacoco.version>
+        <jacoco.version>0.8.10</jacoco.version>
         <sonar.java.coveragePlugin>jacoco</sonar.java.coveragePlugin>
         <sonar.java.coveragePlugin>jacoco</sonar.java.coveragePlugin>
         <sonar.dynamicAnalysis>reuseReports</sonar.dynamicAnalysis>
         <sonar.dynamicAnalysis>reuseReports</sonar.dynamicAnalysis>
         <sonar.jacoco.reportPath>${project.basedir}/target/jacoco.exec</sonar.jacoco.reportPath>
         <sonar.jacoco.reportPath>${project.basedir}/target/jacoco.exec</sonar.jacoco.reportPath>
@@ -55,7 +55,7 @@
         <dependency>
         <dependency>
             <groupId>org.apache.commons</groupId>
             <groupId>org.apache.commons</groupId>
             <artifactId>commons-lang3</artifactId>
             <artifactId>commons-lang3</artifactId>
-            <version>3.9</version>
+            <version>3.12.0</version>
         </dependency>
         </dependency>
         <dependency>
         <dependency>
             <groupId>org.projectlombok</groupId>
             <groupId>org.projectlombok</groupId>
@@ -91,7 +91,7 @@
         <dependency>
         <dependency>
             <groupId>software.amazon.msk</groupId>
             <groupId>software.amazon.msk</groupId>
             <artifactId>aws-msk-iam-auth</artifactId>
             <artifactId>aws-msk-iam-auth</artifactId>
-            <version>1.1.5</version>
+            <version>1.1.7</version>
         </dependency>
         </dependency>
 
 
         <dependency>
         <dependency>
@@ -109,6 +109,11 @@
             <groupId>io.projectreactor.addons</groupId>
             <groupId>io.projectreactor.addons</groupId>
             <artifactId>reactor-extra</artifactId>
             <artifactId>reactor-extra</artifactId>
         </dependency>
         </dependency>
+        <dependency>
+            <groupId>org.json</groupId>
+            <artifactId>json</artifactId>
+            <version>${org.json.version}</version>
+        </dependency>
 
 
         <dependency>
         <dependency>
             <groupId>org.springframework.boot</groupId>
             <groupId>org.springframework.boot</groupId>
@@ -306,7 +311,7 @@
             <plugin>
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
-                <version>3.1.2</version>
+                <version>3.3.0</version>
                 <dependencies>
                 <dependencies>
                     <dependency>
                     <dependency>
                         <groupId>com.puppycrawl.tools</groupId>
                         <groupId>com.puppycrawl.tools</groupId>

+ 15 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java

@@ -51,6 +51,7 @@ public class ClustersProperties {
     List<Masking> masking;
     List<Masking> masking;
     Long pollingThrottleRate;
     Long pollingThrottleRate;
     TruststoreConfig ssl;
     TruststoreConfig ssl;
+    AuditProperties audit;
   }
   }
 
 
   @Data
   @Data
@@ -131,8 +132,9 @@ public class ClustersProperties {
   @Data
   @Data
   public static class Masking {
   public static class Masking {
     Type type;
     Type type;
-    List<String> fields; //if null or empty list - policy will be applied to all fields
-    List<String> pattern; //used when type=MASK
+    List<String> fields;
+    String fieldsNamePattern;
+    List<String> maskingCharsReplacement; //used when type=MASK
     String replacement; //used when type=REPLACE
     String replacement; //used when type=REPLACE
     String topicKeysPattern;
     String topicKeysPattern;
     String topicValuesPattern;
     String topicValuesPattern;
@@ -142,6 +144,17 @@ public class ClustersProperties {
     }
     }
   }
   }
 
 
+  @Data
+  @NoArgsConstructor
+  @AllArgsConstructor
+  public static class AuditProperties {
+    String topic;
+    Integer auditTopicsPartitions;
+    Boolean topicAuditEnabled;
+    Boolean consoleAuditEnabled;
+    Map<String, String> auditTopicProperties;
+  }
+
   @PostConstruct
   @PostConstruct
   public void validateAndSetDefaults() {
   public void validateAndSetDefaults() {
     if (clusters != null) {
     if (clusters != null) {

+ 31 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java

@@ -1,18 +1,41 @@
 package com.provectus.kafka.ui.config;
 package com.provectus.kafka.ui.config;
 
 
+import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.context.annotation.Configuration;
+import org.springframework.http.HttpHeaders;
+import org.springframework.http.HttpMethod;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.server.reactive.ServerHttpRequest;
+import org.springframework.http.server.reactive.ServerHttpResponse;
 import org.springframework.web.reactive.config.CorsRegistry;
 import org.springframework.web.reactive.config.CorsRegistry;
 import org.springframework.web.reactive.config.WebFluxConfigurer;
 import org.springframework.web.reactive.config.WebFluxConfigurer;
+import org.springframework.web.server.ServerWebExchange;
+import org.springframework.web.server.WebFilter;
+import org.springframework.web.server.WebFilterChain;
+import reactor.core.publisher.Mono;
 
 
 @Configuration
 @Configuration
-public class CorsGlobalConfiguration implements WebFluxConfigurer {
+public class CorsGlobalConfiguration {
 
 
-  @Override
-  public void addCorsMappings(CorsRegistry registry) {
-    registry.addMapping("/**")
-        .allowedOrigins("*")
-        .allowedMethods("*")
-        .allowedHeaders("*")
-        .allowCredentials(false);
+  @Bean
+  public WebFilter corsFilter() {
+    return (final ServerWebExchange ctx, final WebFilterChain chain) -> {
+      final ServerHttpRequest request = ctx.getRequest();
+
+      final ServerHttpResponse response = ctx.getResponse();
+      final HttpHeaders headers = response.getHeaders();
+      headers.add("Access-Control-Allow-Origin", "*");
+      headers.add("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE, OPTIONS");
+      headers.add("Access-Control-Max-Age", "3600");
+      headers.add("Access-Control-Allow-Headers", "Content-Type");
+
+      if (request.getMethod() == HttpMethod.OPTIONS) {
+        response.setStatusCode(HttpStatus.OK);
+        return Mono.empty();
+      }
+
+      return chain.filter(ctx);
+    };
   }
   }
+
 }
 }

+ 11 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/BasicAuthSecurityConfig.java

@@ -7,12 +7,10 @@ import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
 import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
-import org.springframework.security.config.web.server.SecurityWebFiltersOrder;
 import org.springframework.security.config.web.server.ServerHttpSecurity;
 import org.springframework.security.config.web.server.ServerHttpSecurity;
 import org.springframework.security.web.server.SecurityWebFilterChain;
 import org.springframework.security.web.server.SecurityWebFilterChain;
 import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
 import org.springframework.security.web.server.authentication.RedirectServerAuthenticationSuccessHandler;
 import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
 import org.springframework.security.web.server.authentication.logout.RedirectServerLogoutSuccessHandler;
-import org.springframework.security.web.server.ui.LogoutPageGeneratingWebFilter;
 
 
 @Configuration
 @Configuration
 @EnableWebFluxSecurity
 @EnableWebFluxSecurity
@@ -33,15 +31,17 @@ public class BasicAuthSecurityConfig extends AbstractAuthSecurityConfig {
     final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
     final var logoutSuccessHandler = new RedirectServerLogoutSuccessHandler();
     logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
     logoutSuccessHandler.setLogoutSuccessUrl(URI.create(LOGOUT_URL));
 
 
-    return http
-        .addFilterAfter(new LogoutPageGeneratingWebFilter(), SecurityWebFiltersOrder.REACTOR_CONTEXT)
-        .csrf().disable()
-        .authorizeExchange()
-        .pathMatchers(AUTH_WHITELIST).permitAll()
-        .anyExchange().authenticated()
-        .and().formLogin().loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler)
-        .and().logout().logoutSuccessHandler(logoutSuccessHandler)
-        .and().build();
+
+    return http.authorizeExchange(spec -> spec
+            .pathMatchers(AUTH_WHITELIST)
+            .permitAll()
+            .anyExchange()
+            .authenticated()
+        )
+        .formLogin(spec -> spec.loginPage(LOGIN_URL).authenticationSuccessHandler(authHandler))
+        .logout(spec -> spec.logoutSuccessHandler(logoutSuccessHandler))
+        .csrf(ServerHttpSecurity.CsrfSpec::disable)
+        .build();
   }
   }
 
 
 }
 }

+ 6 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/DisabledAuthSecurityConfig.java

@@ -27,10 +27,12 @@ public class DisabledAuthSecurityConfig extends AbstractAuthSecurityConfig {
       System.exit(1);
       System.exit(1);
     }
     }
     log.warn("Authentication is disabled. Access will be unrestricted.");
     log.warn("Authentication is disabled. Access will be unrestricted.");
-    return http.authorizeExchange()
-        .anyExchange().permitAll()
-        .and()
-        .csrf().disable()
+
+    return http.authorizeExchange(spec -> spec
+            .anyExchange()
+            .permitAll()
+        )
+        .csrf(ServerHttpSecurity.CsrfSpec::disable)
         .build();
         .build();
   }
   }
 
 

+ 26 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapProperties.java

@@ -0,0 +1,26 @@
+package com.provectus.kafka.ui.config.auth;
+
+import lombok.Data;
+import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+
+@ConfigurationProperties("spring.ldap")
+@Data
+public class LdapProperties {
+
+  private String urls;
+  private String base;
+  private String adminUser;
+  private String adminPassword;
+  private String userFilterSearchBase;
+  private String userFilterSearchFilter;
+  private String groupFilterSearchBase;
+  private String groupFilterSearchFilter;
+  private String groupRoleAttribute;
+
+  @Value("${oauth2.ldap.activeDirectory:false}")
+  private boolean isActiveDirectory;
+  @Value("${oauth2.ldap.aсtiveDirectory.domain:@null}")
+  private String activeDirectoryDomain;
+
+}

+ 86 - 41
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/LdapSecurityConfig.java

@@ -1,106 +1,151 @@
 package com.provectus.kafka.ui.config.auth;
 package com.provectus.kafka.ui.config.auth;
 
 
+import static com.provectus.kafka.ui.config.auth.AbstractAuthSecurityConfig.AUTH_WHITELIST;
+
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
+import com.provectus.kafka.ui.service.rbac.extractor.RbacLdapAuthoritiesExtractor;
+import java.util.Collection;
 import java.util.List;
 import java.util.List;
+import java.util.Optional;
+import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
-import org.springframework.beans.factory.annotation.Value;
 import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
 import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
 import org.springframework.boot.autoconfigure.ldap.LdapAutoConfiguration;
 import org.springframework.boot.autoconfigure.ldap.LdapAutoConfiguration;
+import org.springframework.boot.context.properties.EnableConfigurationProperties;
+import org.springframework.context.ApplicationContext;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.context.annotation.Import;
 import org.springframework.context.annotation.Import;
+import org.springframework.context.annotation.Primary;
+import org.springframework.ldap.core.DirContextOperations;
 import org.springframework.ldap.core.support.BaseLdapPathContextSource;
 import org.springframework.ldap.core.support.BaseLdapPathContextSource;
 import org.springframework.ldap.core.support.LdapContextSource;
 import org.springframework.ldap.core.support.LdapContextSource;
 import org.springframework.security.authentication.AuthenticationManager;
 import org.springframework.security.authentication.AuthenticationManager;
 import org.springframework.security.authentication.ProviderManager;
 import org.springframework.security.authentication.ProviderManager;
 import org.springframework.security.authentication.ReactiveAuthenticationManager;
 import org.springframework.security.authentication.ReactiveAuthenticationManager;
 import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
 import org.springframework.security.authentication.ReactiveAuthenticationManagerAdapter;
+import org.springframework.security.config.Customizer;
 import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
 import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
 import org.springframework.security.config.web.server.ServerHttpSecurity;
 import org.springframework.security.config.web.server.ServerHttpSecurity;
+import org.springframework.security.core.GrantedAuthority;
+import org.springframework.security.core.userdetails.UserDetails;
 import org.springframework.security.ldap.authentication.AbstractLdapAuthenticationProvider;
 import org.springframework.security.ldap.authentication.AbstractLdapAuthenticationProvider;
 import org.springframework.security.ldap.authentication.BindAuthenticator;
 import org.springframework.security.ldap.authentication.BindAuthenticator;
 import org.springframework.security.ldap.authentication.LdapAuthenticationProvider;
 import org.springframework.security.ldap.authentication.LdapAuthenticationProvider;
 import org.springframework.security.ldap.authentication.ad.ActiveDirectoryLdapAuthenticationProvider;
 import org.springframework.security.ldap.authentication.ad.ActiveDirectoryLdapAuthenticationProvider;
 import org.springframework.security.ldap.search.FilterBasedLdapUserSearch;
 import org.springframework.security.ldap.search.FilterBasedLdapUserSearch;
 import org.springframework.security.ldap.search.LdapUserSearch;
 import org.springframework.security.ldap.search.LdapUserSearch;
+import org.springframework.security.ldap.userdetails.DefaultLdapAuthoritiesPopulator;
+import org.springframework.security.ldap.userdetails.LdapAuthoritiesPopulator;
+import org.springframework.security.ldap.userdetails.LdapUserDetailsMapper;
 import org.springframework.security.web.server.SecurityWebFilterChain;
 import org.springframework.security.web.server.SecurityWebFilterChain;
 
 
 @Configuration
 @Configuration
 @EnableWebFluxSecurity
 @EnableWebFluxSecurity
 @ConditionalOnProperty(value = "auth.type", havingValue = "LDAP")
 @ConditionalOnProperty(value = "auth.type", havingValue = "LDAP")
 @Import(LdapAutoConfiguration.class)
 @Import(LdapAutoConfiguration.class)
+@EnableConfigurationProperties(LdapProperties.class)
+@RequiredArgsConstructor
 @Slf4j
 @Slf4j
-public class LdapSecurityConfig extends AbstractAuthSecurityConfig {
-
-  @Value("${spring.ldap.urls}")
-  private String ldapUrls;
-  @Value("${spring.ldap.dn.pattern:#{null}}")
-  private String ldapUserDnPattern;
-  @Value("${spring.ldap.adminUser:#{null}}")
-  private String adminUser;
-  @Value("${spring.ldap.adminPassword:#{null}}")
-  private String adminPassword;
-  @Value("${spring.ldap.userFilter.searchBase:#{null}}")
-  private String userFilterSearchBase;
-  @Value("${spring.ldap.userFilter.searchFilter:#{null}}")
-  private String userFilterSearchFilter;
-
-  @Value("${oauth2.ldap.activeDirectory:false}")
-  private boolean isActiveDirectory;
-  @Value("${oauth2.ldap.aсtiveDirectory.domain:#{null}}")
-  private String activeDirectoryDomain;
+public class LdapSecurityConfig {
+
+  private final LdapProperties props;
 
 
   @Bean
   @Bean
-  public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource) {
+  public ReactiveAuthenticationManager authenticationManager(BaseLdapPathContextSource contextSource,
+                                                             LdapAuthoritiesPopulator authoritiesExtractor,
+                                                             AccessControlService acs) {
+    var rbacEnabled = acs.isRbacEnabled();
     BindAuthenticator ba = new BindAuthenticator(contextSource);
     BindAuthenticator ba = new BindAuthenticator(contextSource);
-    if (ldapUserDnPattern != null) {
-      ba.setUserDnPatterns(new String[] {ldapUserDnPattern});
+    if (props.getBase() != null) {
+      ba.setUserDnPatterns(new String[] {props.getBase()});
     }
     }
-    if (userFilterSearchFilter != null) {
+    if (props.getUserFilterSearchFilter() != null) {
       LdapUserSearch userSearch =
       LdapUserSearch userSearch =
-          new FilterBasedLdapUserSearch(userFilterSearchBase, userFilterSearchFilter, contextSource);
+          new FilterBasedLdapUserSearch(props.getUserFilterSearchBase(), props.getUserFilterSearchFilter(),
+              contextSource);
       ba.setUserSearch(userSearch);
       ba.setUserSearch(userSearch);
     }
     }
 
 
     AbstractLdapAuthenticationProvider authenticationProvider;
     AbstractLdapAuthenticationProvider authenticationProvider;
-    if (!isActiveDirectory) {
-      authenticationProvider = new LdapAuthenticationProvider(ba);
+    if (!props.isActiveDirectory()) {
+      authenticationProvider = rbacEnabled
+          ? new LdapAuthenticationProvider(ba, authoritiesExtractor)
+          : new LdapAuthenticationProvider(ba);
     } else {
     } else {
-      authenticationProvider = new ActiveDirectoryLdapAuthenticationProvider(activeDirectoryDomain, ldapUrls);
+      authenticationProvider = new ActiveDirectoryLdapAuthenticationProvider(props.getActiveDirectoryDomain(),
+          props.getUrls()); // TODO Issue #3741
       authenticationProvider.setUseAuthenticationRequestCredentials(true);
       authenticationProvider.setUseAuthenticationRequestCredentials(true);
     }
     }
 
 
+    if (rbacEnabled) {
+      authenticationProvider.setUserDetailsContextMapper(new UserDetailsMapper());
+    }
+
     AuthenticationManager am = new ProviderManager(List.of(authenticationProvider));
     AuthenticationManager am = new ProviderManager(List.of(authenticationProvider));
 
 
     return new ReactiveAuthenticationManagerAdapter(am);
     return new ReactiveAuthenticationManagerAdapter(am);
   }
   }
 
 
   @Bean
   @Bean
+  @Primary
   public BaseLdapPathContextSource contextSource() {
   public BaseLdapPathContextSource contextSource() {
     LdapContextSource ctx = new LdapContextSource();
     LdapContextSource ctx = new LdapContextSource();
-    ctx.setUrl(ldapUrls);
-    ctx.setUserDn(adminUser);
-    ctx.setPassword(adminPassword);
+    ctx.setUrl(props.getUrls());
+    ctx.setUserDn(props.getAdminUser());
+    ctx.setPassword(props.getAdminPassword());
     ctx.afterPropertiesSet();
     ctx.afterPropertiesSet();
     return ctx;
     return ctx;
   }
   }
 
 
+  @Bean
+  @Primary
+  public DefaultLdapAuthoritiesPopulator ldapAuthoritiesExtractor(ApplicationContext context,
+                                                                  BaseLdapPathContextSource contextSource,
+                                                                  AccessControlService acs) {
+    var rbacEnabled = acs != null && acs.isRbacEnabled();
+
+    DefaultLdapAuthoritiesPopulator extractor;
+
+    if (rbacEnabled) {
+      extractor = new RbacLdapAuthoritiesExtractor(context, contextSource, props.getGroupFilterSearchBase());
+    } else {
+      extractor = new DefaultLdapAuthoritiesPopulator(contextSource, props.getGroupFilterSearchBase());
+    }
+
+    Optional.ofNullable(props.getGroupFilterSearchFilter()).ifPresent(extractor::setGroupSearchFilter);
+    extractor.setRolePrefix("");
+    extractor.setConvertToUpperCase(false);
+    extractor.setSearchSubtree(true);
+    return extractor;
+  }
+
   @Bean
   @Bean
   public SecurityWebFilterChain configureLdap(ServerHttpSecurity http) {
   public SecurityWebFilterChain configureLdap(ServerHttpSecurity http) {
     log.info("Configuring LDAP authentication.");
     log.info("Configuring LDAP authentication.");
-    if (isActiveDirectory) {
+    if (props.isActiveDirectory()) {
       log.info("Active Directory support for LDAP has been enabled.");
       log.info("Active Directory support for LDAP has been enabled.");
     }
     }
 
 
-    http
-        .authorizeExchange()
-        .pathMatchers(AUTH_WHITELIST)
-        .permitAll()
-        .anyExchange()
-        .authenticated()
-        .and()
-        .httpBasic();
+    return http.authorizeExchange(spec -> spec
+            .pathMatchers(AUTH_WHITELIST)
+            .permitAll()
+            .anyExchange()
+            .authenticated()
+        )
+        .formLogin(Customizer.withDefaults())
+        .logout(Customizer.withDefaults())
+        .csrf(ServerHttpSecurity.CsrfSpec::disable)
+        .build();
+  }
 
 
-    return http.csrf().disable().build();
+  private static class UserDetailsMapper extends LdapUserDetailsMapper {
+    @Override
+    public UserDetails mapUserFromContext(DirContextOperations ctx, String username,
+                                          Collection<? extends GrantedAuthority> authorities) {
+      UserDetails userDetails = super.mapUserFromContext(ctx, username, authorities);
+      return new RbacLdapUser(userDetails);
+    }
   }
   }
 
 
 }
 }

+ 11 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthProperties.java

@@ -1,6 +1,7 @@
 package com.provectus.kafka.ui.config.auth;
 package com.provectus.kafka.ui.config.auth;
 
 
 import jakarta.annotation.PostConstruct;
 import jakarta.annotation.PostConstruct;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
@@ -14,7 +15,16 @@ public class OAuthProperties {
   private Map<String, OAuth2Provider> client = new HashMap<>();
   private Map<String, OAuth2Provider> client = new HashMap<>();
 
 
   @PostConstruct
   @PostConstruct
-  public void validate() {
+  public void init() {
+    getClient().values().forEach((provider) -> {
+      if (provider.getCustomParams() == null) {
+        provider.setCustomParams(Collections.emptyMap());
+      }
+      if (provider.getScope() == null) {
+        provider.setScope(Collections.emptySet());
+      }
+    });
+
     getClient().values().forEach(this::validateProvider);
     getClient().values().forEach(this::validateProvider);
   }
   }
 
 

+ 1 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthPropertiesConverter.java

@@ -73,8 +73,7 @@ public final class OAuthPropertiesConverter {
   }
   }
 
 
   private static boolean isGoogle(OAuth2Provider provider) {
   private static boolean isGoogle(OAuth2Provider provider) {
-    return provider.getCustomParams() != null
-        && GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
+    return GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
   }
   }
 }
 }
 
 

+ 24 - 29
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthSecurityConfig.java

@@ -12,10 +12,11 @@ import lombok.extern.log4j.Log4j2;
 import org.jetbrains.annotations.Nullable;
 import org.jetbrains.annotations.Nullable;
 import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
 import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
 import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
 import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties;
-import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesRegistrationAdapter;
+import org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientPropertiesMapper;
 import org.springframework.boot.context.properties.EnableConfigurationProperties;
 import org.springframework.boot.context.properties.EnableConfigurationProperties;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 import org.springframework.context.annotation.Configuration;
+import org.springframework.security.config.Customizer;
 import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
 import org.springframework.security.config.annotation.method.configuration.EnableReactiveMethodSecurity;
 import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
 import org.springframework.security.config.annotation.web.reactive.EnableWebFluxSecurity;
 import org.springframework.security.config.web.server.ServerHttpSecurity;
 import org.springframework.security.config.web.server.ServerHttpSecurity;
@@ -49,21 +50,15 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
   public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
   public SecurityWebFilterChain configure(ServerHttpSecurity http, OAuthLogoutSuccessHandler logoutHandler) {
     log.info("Configuring OAUTH2 authentication.");
     log.info("Configuring OAUTH2 authentication.");
 
 
-    return http.authorizeExchange()
-        .pathMatchers(AUTH_WHITELIST)
-        .permitAll()
-        .anyExchange()
-        .authenticated()
-
-        .and()
-        .oauth2Login()
-
-        .and()
-        .logout()
-        .logoutSuccessHandler(logoutHandler)
-
-        .and()
-        .csrf().disable()
+    return http.authorizeExchange(spec -> spec
+            .pathMatchers(AUTH_WHITELIST)
+            .permitAll()
+            .anyExchange()
+            .authenticated()
+        )
+        .oauth2Login(Customizer.withDefaults())
+        .logout(spec -> spec.logoutSuccessHandler(logoutHandler))
+        .csrf(ServerHttpSecurity.CsrfSpec::disable)
         .build();
         .build();
   }
   }
 
 
@@ -72,13 +67,13 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
     final OidcReactiveOAuth2UserService delegate = new OidcReactiveOAuth2UserService();
     final OidcReactiveOAuth2UserService delegate = new OidcReactiveOAuth2UserService();
     return request -> delegate.loadUser(request)
     return request -> delegate.loadUser(request)
         .flatMap(user -> {
         .flatMap(user -> {
-          String providerId = request.getClientRegistration().getRegistrationId();
-          final var extractor = getExtractor(providerId, acs);
+          var provider = getProviderByProviderId(request.getClientRegistration().getRegistrationId());
+          final var extractor = getExtractor(provider, acs);
           if (extractor == null) {
           if (extractor == null) {
             return Mono.just(user);
             return Mono.just(user);
           }
           }
 
 
-          return extractor.extract(acs, user, Map.of("request", request))
+          return extractor.extract(acs, user, Map.of("request", request, "provider", provider))
               .map(groups -> new RbacOidcUser(user, groups));
               .map(groups -> new RbacOidcUser(user, groups));
         });
         });
   }
   }
@@ -88,13 +83,13 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
     final DefaultReactiveOAuth2UserService delegate = new DefaultReactiveOAuth2UserService();
     final DefaultReactiveOAuth2UserService delegate = new DefaultReactiveOAuth2UserService();
     return request -> delegate.loadUser(request)
     return request -> delegate.loadUser(request)
         .flatMap(user -> {
         .flatMap(user -> {
-          String providerId = request.getClientRegistration().getRegistrationId();
-          final var extractor = getExtractor(providerId, acs);
+          var provider = getProviderByProviderId(request.getClientRegistration().getRegistrationId());
+          final var extractor = getExtractor(provider, acs);
           if (extractor == null) {
           if (extractor == null) {
             return Mono.just(user);
             return Mono.just(user);
           }
           }
 
 
-          return extractor.extract(acs, user, Map.of("request", request))
+          return extractor.extract(acs, user, Map.of("request", request, "provider", provider))
               .map(groups -> new RbacOAuth2User(user, groups));
               .map(groups -> new RbacOAuth2User(user, groups));
         });
         });
   }
   }
@@ -103,7 +98,7 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
   public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
   public InMemoryReactiveClientRegistrationRepository clientRegistrationRepository() {
     final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
     final OAuth2ClientProperties props = OAuthPropertiesConverter.convertProperties(properties);
     final List<ClientRegistration> registrations =
     final List<ClientRegistration> registrations =
-        new ArrayList<>(OAuth2ClientPropertiesRegistrationAdapter.getClientRegistrations(props).values());
+        new ArrayList<>(new OAuth2ClientPropertiesMapper(props).asClientRegistrations().values());
     return new InMemoryReactiveClientRegistrationRepository(registrations);
     return new InMemoryReactiveClientRegistrationRepository(registrations);
   }
   }
 
 
@@ -113,18 +108,18 @@ public class OAuthSecurityConfig extends AbstractAuthSecurityConfig {
   }
   }
 
 
   @Nullable
   @Nullable
-  private ProviderAuthorityExtractor getExtractor(final String providerId, AccessControlService acs) {
-    final String provider = getProviderByProviderId(providerId);
-    Optional<ProviderAuthorityExtractor> extractor = acs.getExtractors()
+  private ProviderAuthorityExtractor getExtractor(final OAuthProperties.OAuth2Provider provider,
+                                                  AccessControlService acs) {
+    Optional<ProviderAuthorityExtractor> extractor = acs.getOauthExtractors()
         .stream()
         .stream()
-        .filter(e -> e.isApplicable(provider))
+        .filter(e -> e.isApplicable(provider.getProvider(), provider.getCustomParams()))
         .findFirst();
         .findFirst();
 
 
     return extractor.orElse(null);
     return extractor.orElse(null);
   }
   }
 
 
-  private String getProviderByProviderId(final String providerId) {
-    return properties.getClient().get(providerId).getProvider();
+  private OAuthProperties.OAuth2Provider getProviderByProviderId(final String providerId) {
+    return properties.getClient().get(providerId);
   }
   }
 
 
 }
 }

+ 60 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/RbacLdapUser.java

@@ -0,0 +1,60 @@
+package com.provectus.kafka.ui.config.auth;
+
+import java.util.Collection;
+import java.util.stream.Collectors;
+import org.springframework.security.core.GrantedAuthority;
+import org.springframework.security.core.userdetails.UserDetails;
+
+public class RbacLdapUser implements UserDetails, RbacUser {
+
+  private final UserDetails userDetails;
+
+  public RbacLdapUser(UserDetails userDetails) {
+    this.userDetails = userDetails;
+  }
+
+  @Override
+  public String name() {
+    return userDetails.getUsername();
+  }
+
+  @Override
+  public Collection<String> groups() {
+    return userDetails.getAuthorities().stream().map(GrantedAuthority::getAuthority).collect(Collectors.toSet());
+  }
+
+  @Override
+  public Collection<? extends GrantedAuthority> getAuthorities() {
+    return userDetails.getAuthorities();
+  }
+
+  @Override
+  public String getPassword() {
+    return userDetails.getPassword();
+  }
+
+  @Override
+  public String getUsername() {
+    return userDetails.getUsername();
+  }
+
+  @Override
+  public boolean isAccountNonExpired() {
+    return userDetails.isAccountNonExpired();
+  }
+
+  @Override
+  public boolean isAccountNonLocked() {
+    return userDetails.isAccountNonLocked();
+  }
+
+  @Override
+  public boolean isCredentialsNonExpired() {
+    return userDetails.isCredentialsNonExpired();
+  }
+
+  @Override
+  public boolean isEnabled() {
+    return userDetails.isEnabled();
+  }
+}

+ 21 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/condition/ActiveDirectoryCondition.java

@@ -0,0 +1,21 @@
+package com.provectus.kafka.ui.config.auth.condition;
+
+import org.springframework.boot.autoconfigure.condition.AllNestedConditions;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
+
+public class ActiveDirectoryCondition extends AllNestedConditions {
+
+  public ActiveDirectoryCondition() {
+    super(ConfigurationPhase.PARSE_CONFIGURATION);
+  }
+
+  @ConditionalOnProperty(value = "auth.type", havingValue = "LDAP")
+  public static class OnAuthType {
+
+  }
+
+  @ConditionalOnProperty(value = "${oauth2.ldap.activeDirectory}:false", havingValue = "true", matchIfMissing = false)
+  public static class OnActiveDirectory {
+
+  }
+}

+ 2 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/CognitoLogoutSuccessHandler.java

@@ -46,10 +46,8 @@ public class CognitoLogoutSuccessHandler implements LogoutSuccessHandler {
         .fragment(null)
         .fragment(null)
         .build();
         .build();
 
 
-    Assert.isTrue(
-        provider.getCustomParams() != null && provider.getCustomParams().containsKey("logoutUrl"),
-        "Custom params should contain 'logoutUrl'"
-    );
+    Assert.isTrue(provider.getCustomParams().containsKey("logoutUrl"),
+        "Custom params should contain 'logoutUrl'");
     final var uri = UriComponentsBuilder
     final var uri = UriComponentsBuilder
         .fromUri(URI.create(provider.getCustomParams().get("logoutUrl")))
         .fromUri(URI.create(provider.getCustomParams().get("logoutUrl")))
         .queryParam("client_id", provider.getClientId())
         .queryParam("client_id", provider.getClientId())

+ 19 - 10
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AccessController.java

@@ -12,8 +12,11 @@ import java.security.Principal;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.List;
 import java.util.List;
+import java.util.Objects;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
+import javax.annotation.Nullable;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
 import org.springframework.http.ResponseEntity;
 import org.springframework.http.ResponseEntity;
 import org.springframework.security.core.context.ReactiveSecurityContextHolder;
 import org.springframework.security.core.context.ReactiveSecurityContextHolder;
 import org.springframework.security.core.context.SecurityContext;
 import org.springframework.security.core.context.SecurityContext;
@@ -23,15 +26,12 @@ import reactor.core.publisher.Mono;
 
 
 @RestController
 @RestController
 @RequiredArgsConstructor
 @RequiredArgsConstructor
+@Slf4j
 public class AccessController implements AuthorizationApi {
 public class AccessController implements AuthorizationApi {
 
 
   private final AccessControlService accessControlService;
   private final AccessControlService accessControlService;
 
 
   public Mono<ResponseEntity<AuthenticationInfoDTO>> getUserAuthInfo(ServerWebExchange exchange) {
   public Mono<ResponseEntity<AuthenticationInfoDTO>> getUserAuthInfo(ServerWebExchange exchange) {
-    AuthenticationInfoDTO dto = new AuthenticationInfoDTO();
-    dto.setRbacEnabled(accessControlService.isRbacEnabled());
-    UserInfoDTO userInfo = new UserInfoDTO();
-
     Mono<List<UserPermissionDTO>> permissions = accessControlService.getUser()
     Mono<List<UserPermissionDTO>> permissions = accessControlService.getUser()
         .map(user -> accessControlService.getRoles()
         .map(user -> accessControlService.getRoles()
             .stream()
             .stream()
@@ -49,13 +49,11 @@ public class AccessController implements AuthorizationApi {
     return userName
     return userName
         .zipWith(permissions)
         .zipWith(permissions)
         .map(data -> {
         .map(data -> {
-          userInfo.setUsername(data.getT1());
-          userInfo.setPermissions(data.getT2());
-
-          dto.setUserInfo(userInfo);
+          var dto = new AuthenticationInfoDTO(accessControlService.isRbacEnabled());
+          dto.setUserInfo(new UserInfoDTO(data.getT1(), data.getT2()));
           return dto;
           return dto;
         })
         })
-        .switchIfEmpty(Mono.just(dto))
+        .switchIfEmpty(Mono.just(new AuthenticationInfoDTO(accessControlService.isRbacEnabled())))
         .map(ResponseEntity::ok);
         .map(ResponseEntity::ok);
   }
   }
 
 
@@ -70,11 +68,22 @@ public class AccessController implements AuthorizationApi {
           dto.setActions(permission.getActions()
           dto.setActions(permission.getActions()
               .stream()
               .stream()
               .map(String::toUpperCase)
               .map(String::toUpperCase)
-              .map(ActionDTO::valueOf)
+              .map(this::mapAction)
+              .filter(Objects::nonNull)
               .collect(Collectors.toList()));
               .collect(Collectors.toList()));
           return dto;
           return dto;
         })
         })
         .collect(Collectors.toList());
         .collect(Collectors.toList());
   }
   }
 
 
+  @Nullable
+  private ActionDTO mapAction(String name) {
+    try {
+      return ActionDTO.fromValue(name);
+    } catch (IllegalArgumentException e) {
+      log.warn("Unknown Action [{}], skipping", name);
+      return null;
+    }
+  }
+
 }
 }

+ 126 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AclsController.java

@@ -0,0 +1,126 @@
+package com.provectus.kafka.ui.controller;
+
+import com.provectus.kafka.ui.api.AclsApi;
+import com.provectus.kafka.ui.mapper.ClusterMapper;
+import com.provectus.kafka.ui.model.KafkaAclDTO;
+import com.provectus.kafka.ui.model.KafkaAclNamePatternTypeDTO;
+import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.AclAction;
+import com.provectus.kafka.ui.service.acl.AclsService;
+import com.provectus.kafka.ui.service.audit.AuditService;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
+import java.util.Optional;
+import lombok.RequiredArgsConstructor;
+import org.apache.kafka.common.resource.PatternType;
+import org.apache.kafka.common.resource.ResourcePatternFilter;
+import org.apache.kafka.common.resource.ResourceType;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.server.ServerWebExchange;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+@RestController
+@RequiredArgsConstructor
+public class AclsController extends AbstractController implements AclsApi {
+
+  private final AclsService aclsService;
+  private final AccessControlService accessControlService;
+  private final AuditService auditService;
+
+  @Override
+  public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
+                                              ServerWebExchange exchange) {
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .aclActions(AclAction.EDIT)
+        .operationName("createAcl")
+        .build();
+
+    return accessControlService.validateAccess(context)
+        .then(kafkaAclDto)
+        .map(ClusterMapper::toAclBinding)
+        .flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
+        .doOnEach(sig -> auditService.audit(context, sig))
+        .thenReturn(ResponseEntity.ok().build());
+  }
+
+  @Override
+  public Mono<ResponseEntity<Void>> deleteAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
+                                              ServerWebExchange exchange) {
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .aclActions(AclAction.EDIT)
+        .operationName("deleteAcl")
+        .build();
+
+    return accessControlService.validateAccess(context)
+        .then(kafkaAclDto)
+        .map(ClusterMapper::toAclBinding)
+        .flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
+        .doOnEach(sig -> auditService.audit(context, sig))
+        .thenReturn(ResponseEntity.ok().build());
+  }
+
+  @Override
+  public Mono<ResponseEntity<Flux<KafkaAclDTO>>> listAcls(String clusterName,
+                                                          KafkaAclResourceTypeDTO resourceTypeDto,
+                                                          String resourceName,
+                                                          KafkaAclNamePatternTypeDTO namePatternTypeDto,
+                                                          ServerWebExchange exchange) {
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .aclActions(AclAction.VIEW)
+        .operationName("listAcls")
+        .build();
+
+    var resourceType = Optional.ofNullable(resourceTypeDto)
+        .map(ClusterMapper::mapAclResourceTypeDto)
+        .orElse(ResourceType.ANY);
+
+    var namePatternType = Optional.ofNullable(namePatternTypeDto)
+        .map(ClusterMapper::mapPatternTypeDto)
+        .orElse(PatternType.ANY);
+
+    var filter = new ResourcePatternFilter(resourceType, resourceName, namePatternType);
+
+    return accessControlService.validateAccess(context).then(
+        Mono.just(
+            ResponseEntity.ok(
+                aclsService.listAcls(getCluster(clusterName), filter)
+                    .map(ClusterMapper::toKafkaAclDto)))
+    ).doOnEach(sig -> auditService.audit(context, sig));
+  }
+
+  @Override
+  public Mono<ResponseEntity<String>> getAclAsCsv(String clusterName, ServerWebExchange exchange) {
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .aclActions(AclAction.VIEW)
+        .operationName("getAclAsCsv")
+        .build();
+
+    return accessControlService.validateAccess(context).then(
+        aclsService.getAclAsCsvString(getCluster(clusterName))
+            .map(ResponseEntity::ok)
+            .flatMap(Mono::just)
+            .doOnEach(sig -> auditService.audit(context, sig))
+    );
+  }
+
+  @Override
+  public Mono<ResponseEntity<Void>> syncAclsCsv(String clusterName, Mono<String> csvMono, ServerWebExchange exchange) {
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .aclActions(AclAction.EDIT)
+        .operationName("syncAclsCsv")
+        .build();
+
+    return accessControlService.validateAccess(context)
+        .then(csvMono)
+        .flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
+        .doOnEach(sig -> auditService.audit(context, sig))
+        .thenReturn(ResponseEntity.ok().build());
+  }
+}

+ 39 - 27
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ApplicationConfigController.java

@@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.service.ApplicationInfoService;
 import com.provectus.kafka.ui.service.ApplicationInfoService;
 import com.provectus.kafka.ui.service.KafkaClusterFactory;
 import com.provectus.kafka.ui.service.KafkaClusterFactory;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.util.ApplicationRestarter;
 import com.provectus.kafka.ui.util.ApplicationRestarter;
 import com.provectus.kafka.ui.util.DynamicConfigOperations;
 import com.provectus.kafka.ui.util.DynamicConfigOperations;
@@ -27,6 +28,7 @@ import org.mapstruct.Mapper;
 import org.mapstruct.factory.Mappers;
 import org.mapstruct.factory.Mappers;
 import org.springframework.http.ResponseEntity;
 import org.springframework.http.ResponseEntity;
 import org.springframework.http.codec.multipart.FilePart;
 import org.springframework.http.codec.multipart.FilePart;
+import org.springframework.http.codec.multipart.Part;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.server.ServerWebExchange;
 import org.springframework.web.server.ServerWebExchange;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Flux;
@@ -54,6 +56,7 @@ public class ApplicationConfigController implements ApplicationConfigApi {
   private final ApplicationRestarter restarter;
   private final ApplicationRestarter restarter;
   private final KafkaClusterFactory kafkaClusterFactory;
   private final KafkaClusterFactory kafkaClusterFactory;
   private final ApplicationInfoService applicationInfoService;
   private final ApplicationInfoService applicationInfoService;
+  private final AuditService auditService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
   public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
@@ -62,59 +65,68 @@ public class ApplicationConfigController implements ApplicationConfigApi {
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
   public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
-    return accessControlService
-        .validateAccess(
-            AccessContext.builder()
-                .applicationConfigActions(VIEW)
-                .build()
-        )
+    var context = AccessContext.builder()
+        .applicationConfigActions(VIEW)
+        .operationName("getCurrentConfig")
+        .build();
+    return accessControlService.validateAccess(context)
         .then(Mono.fromSupplier(() -> ResponseEntity.ok(
         .then(Mono.fromSupplier(() -> ResponseEntity.ok(
             new ApplicationConfigDTO()
             new ApplicationConfigDTO()
                 .properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
                 .properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
-        )));
+        )))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
   public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
                                                       ServerWebExchange exchange) {
                                                       ServerWebExchange exchange) {
-    return accessControlService
-        .validateAccess(
-            AccessContext.builder()
-                .applicationConfigActions(EDIT)
-                .build()
-        )
+    var context =  AccessContext.builder()
+        .applicationConfigActions(EDIT)
+        .operationName("restartWithConfig")
+        .build();
+    return accessControlService.validateAccess(context)
         .then(restartRequestDto)
         .then(restartRequestDto)
-        .map(dto -> {
+        .<ResponseEntity<Void>>map(dto -> {
           dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
           dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
           restarter.requestRestart();
           restarter.requestRestart();
           return ResponseEntity.ok().build();
           return ResponseEntity.ok().build();
-        });
+        })
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(FilePart file, ServerWebExchange exchange) {
-    return accessControlService
-        .validateAccess(
-            AccessContext.builder()
-                .applicationConfigActions(EDIT)
-                .build()
-        )
-        .then(dynamicConfigOperations.uploadConfigRelatedFile(file))
-        .map(path -> new UploadedFileInfoDTO().location(path.toString()))
-        .map(ResponseEntity::ok);
+  public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(Flux<Part> fileFlux,
+                                                                           ServerWebExchange exchange) {
+    var context = AccessContext.builder()
+        .applicationConfigActions(EDIT)
+        .operationName("uploadConfigRelatedFile")
+        .build();
+    return accessControlService.validateAccess(context)
+        .then(fileFlux.single())
+        .flatMap(file ->
+            dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
+                .map(path -> new UploadedFileInfoDTO().location(path.toString()))
+                .map(ResponseEntity::ok))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
   public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
                                                                              ServerWebExchange exchange) {
                                                                              ServerWebExchange exchange) {
-    return configDto
+    var context = AccessContext.builder()
+        .applicationConfigActions(EDIT)
+        .operationName("validateConfig")
+        .build();
+    return accessControlService.validateAccess(context)
+        .then(configDto)
         .flatMap(config -> {
         .flatMap(config -> {
           PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
           PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
           ClustersProperties clustersProperties = propertiesStructure.getKafka();
           ClustersProperties clustersProperties = propertiesStructure.getKafka();
           return validateClustersConfig(clustersProperties)
           return validateClustersConfig(clustersProperties)
               .map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
               .map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
         })
         })
-        .map(ResponseEntity::ok);
+        .map(ResponseEntity::ok)
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
   private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(

+ 53 - 29
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java

@@ -11,8 +11,11 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
 import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
 import com.provectus.kafka.ui.service.BrokerService;
 import com.provectus.kafka.ui.service.BrokerService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.List;
+import java.util.Map;
+import javax.annotation.Nullable;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
 import org.springframework.http.ResponseEntity;
 import org.springframework.http.ResponseEntity;
@@ -27,61 +30,78 @@ import reactor.core.publisher.Mono;
 public class BrokersController extends AbstractController implements BrokersApi {
 public class BrokersController extends AbstractController implements BrokersApi {
   private final BrokerService brokerService;
   private final BrokerService brokerService;
   private final ClusterMapper clusterMapper;
   private final ClusterMapper clusterMapper;
+
+  private final AuditService auditService;
   private final AccessControlService accessControlService;
   private final AccessControlService accessControlService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
   public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
                                                           ServerWebExchange exchange) {
                                                           ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
-        .build());
+        .operationName("getBrokers")
+        .build();
 
 
     var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
     var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
-
-    return validateAccess.thenReturn(ResponseEntity.ok(job));
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(job))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
   public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
                                                                   ServerWebExchange exchange) {
                                                                   ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
-        .build());
+        .operationName("getBrokersMetrics")
+        .operationParams(Map.of("id", id))
+        .build();
 
 
-    return validateAccess.then(
-        brokerService.getBrokerMetrics(getCluster(clusterName), id)
-            .map(clusterMapper::toBrokerMetrics)
-            .map(ResponseEntity::ok)
-            .onErrorReturn(ResponseEntity.notFound().build())
-    );
+    return accessControlService.validateAccess(context)
+        .then(
+            brokerService.getBrokerMetrics(getCluster(clusterName), id)
+                .map(clusterMapper::toBrokerMetrics)
+                .map(ResponseEntity::ok)
+                .onErrorReturn(ResponseEntity.notFound().build())
+        )
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
   public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
-                                                                            List<Integer> brokers,
+                                                                            @Nullable List<Integer> brokers,
                                                                             ServerWebExchange exchange) {
                                                                             ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+
+    List<Integer> brokerIds = brokers == null ? List.of() : brokers;
+
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
-        .build());
+        .operationName("getAllBrokersLogdirs")
+        .operationParams(Map.of("brokerIds", brokerIds))
+        .build();
 
 
-    return validateAccess.thenReturn(ResponseEntity.ok(
-        brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(
+            brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
   public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
                                                                      Integer id,
                                                                      Integer id,
                                                                      ServerWebExchange exchange) {
                                                                      ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .clusterConfigActions(ClusterConfigAction.VIEW)
         .clusterConfigActions(ClusterConfigAction.VIEW)
-        .build());
+        .operationName("getBrokerConfig")
+        .operationParams(Map.of("brokerId", id))
+        .build();
 
 
-    return validateAccess.thenReturn(
+    return accessControlService.validateAccess(context).thenReturn(
         ResponseEntity.ok(
         ResponseEntity.ok(
             brokerService.getBrokerConfig(getCluster(clusterName), id)
             brokerService.getBrokerConfig(getCluster(clusterName), id)
                 .map(clusterMapper::toBrokerConfig))
                 .map(clusterMapper::toBrokerConfig))
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -89,16 +109,18 @@ public class BrokersController extends AbstractController implements BrokersApi
                                                                      Integer id,
                                                                      Integer id,
                                                                      Mono<BrokerLogdirUpdateDTO> brokerLogdir,
                                                                      Mono<BrokerLogdirUpdateDTO> brokerLogdir,
                                                                      ServerWebExchange exchange) {
                                                                      ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
         .clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
-        .build());
+        .operationName("updateBrokerTopicPartitionLogDir")
+        .operationParams(Map.of("brokerId", id))
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         brokerLogdir
         brokerLogdir
             .flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
             .flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -107,16 +129,18 @@ public class BrokersController extends AbstractController implements BrokersApi
                                                              String name,
                                                              String name,
                                                              Mono<BrokerConfigItemDTO> brokerConfig,
                                                              Mono<BrokerConfigItemDTO> brokerConfig,
                                                              ServerWebExchange exchange) {
                                                              ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
         .clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
-        .build());
+        .operationName("updateBrokerConfigByName")
+        .operationParams(Map.of("brokerId", id))
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         brokerConfig
         brokerConfig
             .flatMap(bci -> brokerService.updateBrokerConfigByName(
             .flatMap(bci -> brokerService.updateBrokerConfigByName(
                 getCluster(clusterName), id, name, bci.getValue()))
                 getCluster(clusterName), id, name, bci.getValue()))
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 }
 }

+ 11 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java

@@ -6,6 +6,7 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
 import com.provectus.kafka.ui.model.ClusterStatsDTO;
 import com.provectus.kafka.ui.model.ClusterStatsDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.service.ClusterService;
 import com.provectus.kafka.ui.service.ClusterService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
@@ -21,6 +22,7 @@ import reactor.core.publisher.Mono;
 public class ClustersController extends AbstractController implements ClustersApi {
 public class ClustersController extends AbstractController implements ClustersApi {
   private final ClusterService clusterService;
   private final ClusterService clusterService;
   private final AccessControlService accessControlService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
   public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
@@ -35,6 +37,7 @@ public class ClustersController extends AbstractController implements ClustersAp
                                                                    ServerWebExchange exchange) {
                                                                    ServerWebExchange exchange) {
     AccessContext context = AccessContext.builder()
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
+        .operationName("getClusterMetrics")
         .build();
         .build();
 
 
     return accessControlService.validateAccess(context)
     return accessControlService.validateAccess(context)
@@ -42,7 +45,8 @@ public class ClustersController extends AbstractController implements ClustersAp
             clusterService.getClusterMetrics(getCluster(clusterName))
             clusterService.getClusterMetrics(getCluster(clusterName))
                 .map(ResponseEntity::ok)
                 .map(ResponseEntity::ok)
                 .onErrorReturn(ResponseEntity.notFound().build())
                 .onErrorReturn(ResponseEntity.notFound().build())
-        );
+        )
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -50,6 +54,7 @@ public class ClustersController extends AbstractController implements ClustersAp
                                                                ServerWebExchange exchange) {
                                                                ServerWebExchange exchange) {
     AccessContext context = AccessContext.builder()
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
+        .operationName("getClusterStats")
         .build();
         .build();
 
 
     return accessControlService.validateAccess(context)
     return accessControlService.validateAccess(context)
@@ -57,7 +62,8 @@ public class ClustersController extends AbstractController implements ClustersAp
             clusterService.getClusterStats(getCluster(clusterName))
             clusterService.getClusterStats(getCluster(clusterName))
                 .map(ResponseEntity::ok)
                 .map(ResponseEntity::ok)
                 .onErrorReturn(ResponseEntity.notFound().build())
                 .onErrorReturn(ResponseEntity.notFound().build())
-        );
+        )
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -66,11 +72,11 @@ public class ClustersController extends AbstractController implements ClustersAp
 
 
     AccessContext context = AccessContext.builder()
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
+        .operationName("updateClusterInfo")
         .build();
         .build();
 
 
     return accessControlService.validateAccess(context)
     return accessControlService.validateAccess(context)
-        .then(
-            clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok)
-        );
+        .then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 }
 }

+ 33 - 22
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java

@@ -19,6 +19,7 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.service.ConsumerGroupService;
 import com.provectus.kafka.ui.service.ConsumerGroupService;
 import com.provectus.kafka.ui.service.OffsetsResetService;
 import com.provectus.kafka.ui.service.OffsetsResetService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Map;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Optional;
@@ -42,6 +43,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
   private final ConsumerGroupService consumerGroupService;
   private final ConsumerGroupService consumerGroupService;
   private final OffsetsResetService offsetsResetService;
   private final OffsetsResetService offsetsResetService;
   private final AccessControlService accessControlService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
 
   @Value("${consumer.groups.page.size:25}")
   @Value("${consumer.groups.page.size:25}")
   private int defaultConsumerGroupsPageSize;
   private int defaultConsumerGroupsPageSize;
@@ -50,44 +52,47 @@ public class ConsumerGroupsController extends AbstractController implements Cons
   public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
   public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
                                                         String id,
                                                         String id,
                                                         ServerWebExchange exchange) {
                                                         ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .consumerGroup(id)
         .consumerGroup(id)
         .consumerGroupActions(DELETE)
         .consumerGroupActions(DELETE)
-        .build());
+        .operationName("deleteConsumerGroup")
+        .build();
 
 
-    return validateAccess.then(
-        consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
-            .thenReturn(ResponseEntity.ok().build())
-    );
+    return accessControlService.validateAccess(context)
+        .then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
+        .doOnEach(sig -> auditService.audit(context, sig))
+        .thenReturn(ResponseEntity.ok().build());
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
   public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
                                                                         String consumerGroupId,
                                                                         String consumerGroupId,
                                                                         ServerWebExchange exchange) {
                                                                         ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .consumerGroup(consumerGroupId)
         .consumerGroup(consumerGroupId)
         .consumerGroupActions(VIEW)
         .consumerGroupActions(VIEW)
-        .build());
+        .operationName("getConsumerGroup")
+        .build();
 
 
-    return validateAccess.then(
-        consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
+    return accessControlService.validateAccess(context)
+        .then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
             .map(ConsumerGroupMapper::toDetailsDto)
             .map(ConsumerGroupMapper::toDetailsDto)
-            .map(ResponseEntity::ok)
-    );
+            .map(ResponseEntity::ok))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
   public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
                                                                              String topicName,
                                                                              String topicName,
                                                                              ServerWebExchange exchange) {
                                                                              ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .topic(topicName)
         .topic(topicName)
         .topicActions(TopicAction.VIEW)
         .topicActions(TopicAction.VIEW)
-        .build());
+        .operationName("getTopicConsumerGroups")
+        .build();
 
 
     Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
     Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
         consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
         consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
@@ -99,7 +104,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
             .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
             .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
 
 
-    return validateAccess.then(job);
+    return accessControlService.validateAccess(context)
+        .then(job)
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -112,12 +119,13 @@ public class ConsumerGroupsController extends AbstractController implements Cons
       SortOrderDTO sortOrderDto,
       SortOrderDTO sortOrderDto,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         // consumer group access validation is within the service
         // consumer group access validation is within the service
-        .build());
+        .operationName("getConsumerGroupsPage")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         consumerGroupService.getConsumerGroupsPage(
         consumerGroupService.getConsumerGroupsPage(
                 getCluster(clusterName),
                 getCluster(clusterName),
                 Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
                 Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
@@ -128,7 +136,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
             )
             )
             .map(this::convertPage)
             .map(this::convertPage)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -137,12 +145,13 @@ public class ConsumerGroupsController extends AbstractController implements Cons
                                                               Mono<ConsumerGroupOffsetsResetDTO> resetDto,
                                                               Mono<ConsumerGroupOffsetsResetDTO> resetDto,
                                                               ServerWebExchange exchange) {
                                                               ServerWebExchange exchange) {
     return resetDto.flatMap(reset -> {
     return resetDto.flatMap(reset -> {
-      Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+      var context = AccessContext.builder()
           .cluster(clusterName)
           .cluster(clusterName)
           .topic(reset.getTopic())
           .topic(reset.getTopic())
           .topicActions(TopicAction.VIEW)
           .topicActions(TopicAction.VIEW)
           .consumerGroupActions(RESET_OFFSETS)
           .consumerGroupActions(RESET_OFFSETS)
-          .build());
+          .operationName("resetConsumerGroupOffsets")
+          .build();
 
 
       Supplier<Mono<Void>> mono = () -> {
       Supplier<Mono<Void>> mono = () -> {
         var cluster = getCluster(clusterName);
         var cluster = getCluster(clusterName);
@@ -182,7 +191,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
         }
         }
       };
       };
 
 
-      return validateAccess.then(mono.get());
+      return accessControlService.validateAccess(context)
+          .then(mono.get())
+          .doOnEach(sig -> auditService.audit(context, sig));
     }).thenReturn(ResponseEntity.ok().build());
     }).thenReturn(ResponseEntity.ok().build());
   }
   }
 
 

+ 95 - 59
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java

@@ -1,5 +1,9 @@
 package com.provectus.kafka.ui.controller;
 package com.provectus.kafka.ui.controller;
 
 
+import static com.provectus.kafka.ui.model.ConnectorActionDTO.RESTART;
+import static com.provectus.kafka.ui.model.ConnectorActionDTO.RESTART_ALL_TASKS;
+import static com.provectus.kafka.ui.model.ConnectorActionDTO.RESTART_FAILED_TASKS;
+
 import com.provectus.kafka.ui.api.KafkaConnectApi;
 import com.provectus.kafka.ui.api.KafkaConnectApi;
 import com.provectus.kafka.ui.model.ConnectDTO;
 import com.provectus.kafka.ui.model.ConnectDTO;
 import com.provectus.kafka.ui.model.ConnectorActionDTO;
 import com.provectus.kafka.ui.model.ConnectorActionDTO;
@@ -14,9 +18,11 @@ import com.provectus.kafka.ui.model.TaskDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
 import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
 import com.provectus.kafka.ui.service.KafkaConnectService;
 import com.provectus.kafka.ui.service.KafkaConnectService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Comparator;
 import java.util.Comparator;
 import java.util.Map;
 import java.util.Map;
+import java.util.Set;
 import javax.validation.Valid;
 import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
@@ -30,8 +36,12 @@ import reactor.core.publisher.Mono;
 @RequiredArgsConstructor
 @RequiredArgsConstructor
 @Slf4j
 @Slf4j
 public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
 public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
+  private static final Set<ConnectorActionDTO> RESTART_ACTIONS
+      = Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
+
   private final KafkaConnectService kafkaConnectService;
   private final KafkaConnectService kafkaConnectService;
   private final AccessControlService accessControlService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
   public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
@@ -47,15 +57,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
   public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
   public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
                                                           ServerWebExchange exchange) {
                                                           ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
         .connectActions(ConnectAction.VIEW)
-        .build());
+        .operationName("getConnectors")
+        .build();
 
 
-    return validateAccess.thenReturn(
-        ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName))
-    );
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName)))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -63,16 +74,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                             @Valid Mono<NewConnectorDTO> connector,
                                                             @Valid Mono<NewConnectorDTO> connector,
                                                             ServerWebExchange exchange) {
                                                             ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW, ConnectAction.CREATE)
         .connectActions(ConnectAction.VIEW, ConnectAction.CREATE)
-        .build());
+        .operationName("createConnector")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
         kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -80,17 +92,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                          String connectorName,
                                                          String connectorName,
                                                          ServerWebExchange exchange) {
                                                          ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
         .connectActions(ConnectAction.VIEW)
         .connector(connectorName)
         .connector(connectorName)
-        .build());
+        .operationName("getConnector")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
         kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -98,16 +111,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                     String connectorName,
                                                     String connectorName,
                                                     ServerWebExchange exchange) {
                                                     ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
         .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
-        .build());
+        .operationName("deleteConnector")
+        .operationParams(Map.of("connectorName", connectName))
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
         kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
 
 
@@ -119,14 +134,23 @@ public class KafkaConnectController extends AbstractController implements KafkaC
       SortOrderDTO sortOrder,
       SortOrderDTO sortOrder,
       ServerWebExchange exchange
       ServerWebExchange exchange
   ) {
   ) {
+    var context = AccessContext.builder()
+        .cluster(clusterName)
+        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
+        .operationName("getAllConnectors")
+        .build();
+
     var comparator = sortOrder == null || sortOrder.equals(SortOrderDTO.ASC)
     var comparator = sortOrder == null || sortOrder.equals(SortOrderDTO.ASC)
         ? getConnectorsComparator(orderBy)
         ? getConnectorsComparator(orderBy)
         : getConnectorsComparator(orderBy).reversed();
         : getConnectorsComparator(orderBy).reversed();
+
     Flux<FullConnectorInfoDTO> job = kafkaConnectService.getAllConnectors(getCluster(clusterName), search)
     Flux<FullConnectorInfoDTO> job = kafkaConnectService.getAllConnectors(getCluster(clusterName), search)
         .filterWhen(dto -> accessControlService.isConnectAccessible(dto.getConnect(), clusterName))
         .filterWhen(dto -> accessControlService.isConnectAccessible(dto.getConnect(), clusterName))
-        .filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName));
+        .filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName))
+        .sort(comparator);
 
 
-    return Mono.just(ResponseEntity.ok(job.sort(comparator)));
+    return Mono.just(ResponseEntity.ok(job))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -135,17 +159,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                       String connectorName,
                                                                       String connectorName,
                                                                       ServerWebExchange exchange) {
                                                                       ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
         .connectActions(ConnectAction.VIEW)
-        .build());
+        .operationName("getConnectorConfig")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService
         kafkaConnectService
             .getConnectorConfig(getCluster(clusterName), connectName, connectorName)
             .getConnectorConfig(getCluster(clusterName), connectName, connectorName)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -154,16 +179,19 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                Mono<Map<String, Object>> requestBody,
                                                                Mono<Map<String, Object>> requestBody,
                                                                ServerWebExchange exchange) {
                                                                ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
         .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
-        .build());
-
-    return validateAccess.then(
-        kafkaConnectService
-            .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
-            .map(ResponseEntity::ok));
+        .operationName("setConnectorConfig")
+        .operationParams(Map.of("connectorName", connectorName))
+        .build();
+
+    return accessControlService.validateAccess(context).then(
+            kafkaConnectService
+                .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
+                .map(ResponseEntity::ok))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -171,18 +199,26 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                          String connectorName,
                                                          String connectorName,
                                                          ConnectorActionDTO action,
                                                          ConnectorActionDTO action,
                                                          ServerWebExchange exchange) {
                                                          ServerWebExchange exchange) {
+    ConnectAction[] connectActions;
+    if (RESTART_ACTIONS.contains(action)) {
+      connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.RESTART};
+    } else {
+      connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.EDIT};
+    }
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
-        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
-        .build());
+        .connectActions(connectActions)
+        .operationName("updateConnectorState")
+        .operationParams(Map.of("connectorName", connectorName))
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService
         kafkaConnectService
             .updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
             .updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -190,17 +226,19 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                String connectName,
                                                                String connectName,
                                                                String connectorName,
                                                                String connectorName,
                                                                ServerWebExchange exchange) {
                                                                ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
         .connectActions(ConnectAction.VIEW)
-        .build());
+        .operationName("getConnectorTasks")
+        .operationParams(Map.of("connectorName", connectorName))
+        .build();
 
 
-    return validateAccess.thenReturn(
+    return accessControlService.validateAccess(context).thenReturn(
         ResponseEntity
         ResponseEntity
             .ok(kafkaConnectService
             .ok(kafkaConnectService
                 .getConnectorTasks(getCluster(clusterName), connectName, connectorName))
                 .getConnectorTasks(getCluster(clusterName), connectName, connectorName))
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -208,34 +246,37 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                          String connectorName, Integer taskId,
                                                          String connectorName, Integer taskId,
                                                          ServerWebExchange exchange) {
                                                          ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
-        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
-        .build());
+        .connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
+        .operationName("restartConnectorTask")
+        .operationParams(Map.of("connectorName", connectorName))
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService
         kafkaConnectService
             .restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
             .restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
   public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
       String clusterName, String connectName, ServerWebExchange exchange) {
       String clusterName, String connectName, ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .connect(connectName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
         .connectActions(ConnectAction.VIEW)
-        .build());
+        .operationName("getConnectorPlugins")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         Mono.just(
         Mono.just(
             ResponseEntity.ok(
             ResponseEntity.ok(
                 kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
                 kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -253,16 +294,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
     if (orderBy == null) {
     if (orderBy == null) {
       return defaultComparator;
       return defaultComparator;
     }
     }
-    switch (orderBy) {
-      case CONNECT:
-        return Comparator.comparing(FullConnectorInfoDTO::getConnect);
-      case TYPE:
-        return Comparator.comparing(FullConnectorInfoDTO::getType);
-      case STATUS:
-        return Comparator.comparing(fullConnectorInfoDTO -> fullConnectorInfoDTO.getStatus().getState());
-      case NAME:
-      default:
-        return defaultComparator;
-    }
+    return switch (orderBy) {
+      case CONNECT -> Comparator.comparing(FullConnectorInfoDTO::getConnect);
+      case TYPE -> Comparator.comparing(FullConnectorInfoDTO::getType);
+      case STATUS -> Comparator.comparing(fullConnectorInfoDTO -> fullConnectorInfoDTO.getStatus().getState());
+      default -> defaultComparator;
+    };
   }
   }
 }
 }

+ 37 - 25
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java

@@ -9,6 +9,7 @@ import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
 import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
 import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
 import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
 import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.List;
@@ -29,38 +30,43 @@ public class KsqlController extends AbstractController implements KsqlApi {
 
 
   private final KsqlServiceV2 ksqlServiceV2;
   private final KsqlServiceV2 ksqlServiceV2;
   private final AccessControlService accessControlService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
   public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
-                                                                    Mono<KsqlCommandV2DTO>
-                                                                        ksqlCommand2Dto,
+                                                                    Mono<KsqlCommandV2DTO> ksqlCmdDo,
                                                                     ServerWebExchange exchange) {
                                                                     ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
-        .cluster(clusterName)
-        .ksqlActions(KsqlAction.EXECUTE)
-        .build());
-
-    return validateAccess.then(
-        ksqlCommand2Dto.map(dto -> {
-          var id = ksqlServiceV2.registerCommand(
-              getCluster(clusterName),
-              dto.getKsql(),
-              Optional.ofNullable(dto.getStreamsProperties()).orElse(Map.of()));
-          return new KsqlCommandV2ResponseDTO().pipeId(id);
-        }).map(ResponseEntity::ok)
-    );
+    return ksqlCmdDo.flatMap(
+            command -> {
+              var context = AccessContext.builder()
+                  .cluster(clusterName)
+                  .ksqlActions(KsqlAction.EXECUTE)
+                  .operationName("executeKsql")
+                  .operationParams(command)
+                  .build();
+              return accessControlService.validateAccess(context).thenReturn(
+                      new KsqlCommandV2ResponseDTO().pipeId(
+                          ksqlServiceV2.registerCommand(
+                              getCluster(clusterName),
+                              command.getKsql(),
+                              Optional.ofNullable(command.getStreamsProperties()).orElse(Map.of()))))
+                  .doOnEach(sig -> auditService.audit(context, sig));
+            }
+        )
+        .map(ResponseEntity::ok);
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<KsqlResponseDTO>>> openKsqlResponsePipe(String clusterName,
   public Mono<ResponseEntity<Flux<KsqlResponseDTO>>> openKsqlResponsePipe(String clusterName,
                                                                           String pipeId,
                                                                           String pipeId,
                                                                           ServerWebExchange exchange) {
                                                                           ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .ksqlActions(KsqlAction.EXECUTE)
         .ksqlActions(KsqlAction.EXECUTE)
-        .build());
+        .operationName("openKsqlResponsePipe")
+        .build();
 
 
-    return validateAccess.thenReturn(
+    return accessControlService.validateAccess(context).thenReturn(
         ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
         ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
             .map(table -> new KsqlResponseDTO()
             .map(table -> new KsqlResponseDTO()
                 .table(
                 .table(
@@ -74,22 +80,28 @@ public class KsqlController extends AbstractController implements KsqlApi {
   @Override
   @Override
   public Mono<ResponseEntity<Flux<KsqlStreamDescriptionDTO>>> listStreams(String clusterName,
   public Mono<ResponseEntity<Flux<KsqlStreamDescriptionDTO>>> listStreams(String clusterName,
                                                                           ServerWebExchange exchange) {
                                                                           ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .ksqlActions(KsqlAction.EXECUTE)
         .ksqlActions(KsqlAction.EXECUTE)
-        .build());
+        .operationName("listStreams")
+        .build();
 
 
-    return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))));
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Flux<KsqlTableDescriptionDTO>>> listTables(String clusterName,
   public Mono<ResponseEntity<Flux<KsqlTableDescriptionDTO>>> listTables(String clusterName,
                                                                         ServerWebExchange exchange) {
                                                                         ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .ksqlActions(KsqlAction.EXECUTE)
         .ksqlActions(KsqlAction.EXECUTE)
-        .build());
+        .operationName("listTables")
+        .build();
 
 
-    return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))));
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 }
 }

+ 26 - 12
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java

@@ -16,12 +16,16 @@ import com.provectus.kafka.ui.model.PollingModeDTO;
 import com.provectus.kafka.ui.model.SeekDirectionDTO;
 import com.provectus.kafka.ui.model.SeekDirectionDTO;
 import com.provectus.kafka.ui.model.SeekTypeDTO;
 import com.provectus.kafka.ui.model.SeekTypeDTO;
 import com.provectus.kafka.ui.model.SerdeUsageDTO;
 import com.provectus.kafka.ui.model.SerdeUsageDTO;
+import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
+import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
 import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
 import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.service.DeserializationService;
 import com.provectus.kafka.ui.service.DeserializationService;
 import com.provectus.kafka.ui.service.MessagesService;
 import com.provectus.kafka.ui.service.MessagesService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.List;
 import java.util.Optional;
 import java.util.Optional;
@@ -44,25 +48,34 @@ public class MessagesController extends AbstractController implements MessagesAp
   private final MessagesService messagesService;
   private final MessagesService messagesService;
   private final DeserializationService deserializationService;
   private final DeserializationService deserializationService;
   private final AccessControlService accessControlService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteTopicMessages(
   public Mono<ResponseEntity<Void>> deleteTopicMessages(
       String clusterName, String topicName, @Valid List<Integer> partitions,
       String clusterName, String topicName, @Valid List<Integer> partitions,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .topic(topicName)
         .topic(topicName)
         .topicActions(MESSAGES_DELETE)
         .topicActions(MESSAGES_DELETE)
-        .build());
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).<ResponseEntity<Void>>then(
         messagesService.deleteTopicMessages(
         messagesService.deleteTopicMessages(
             getCluster(clusterName),
             getCluster(clusterName),
             topicName,
             topicName,
             Optional.ofNullable(partitions).orElse(List.of())
             Optional.ofNullable(partitions).orElse(List.of())
         ).thenReturn(ResponseEntity.ok().build())
         ).thenReturn(ResponseEntity.ok().build())
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
+  }
+
+  @Override
+  public Mono<ResponseEntity<SmartFilterTestExecutionResultDTO>> executeSmartFilterTest(
+      Mono<SmartFilterTestExecutionDTO> smartFilterTestExecutionDto, ServerWebExchange exchange) {
+    return smartFilterTestExecutionDto
+        .map(MessagesService::execSmartFilterTest)
+        .map(ResponseEntity::ok);
   }
   }
 
 
   @Deprecated
   @Deprecated
@@ -124,17 +137,18 @@ public class MessagesController extends AbstractController implements MessagesAp
       String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
       String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
 
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .topic(topicName)
         .topic(topicName)
         .topicActions(MESSAGES_PRODUCE)
         .topicActions(MESSAGES_PRODUCE)
-        .build());
+        .operationName("sendTopicMessages")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         createTopicMessage.flatMap(msg ->
         createTopicMessage.flatMap(msg ->
             messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
             messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
         ).map(ResponseEntity::ok)
         ).map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -142,12 +156,12 @@ public class MessagesController extends AbstractController implements MessagesAp
                                                                  String topicName,
                                                                  String topicName,
                                                                  SerdeUsageDTO use,
                                                                  SerdeUsageDTO use,
                                                                  ServerWebExchange exchange) {
                                                                  ServerWebExchange exchange) {
-
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .topic(topicName)
         .topic(topicName)
         .topicActions(TopicAction.VIEW)
         .topicActions(TopicAction.VIEW)
-        .build());
+        .operationName("getSerdes")
+        .build();
 
 
     TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO()
     TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO()
         .key(use == SerdeUsageDTO.SERIALIZE
         .key(use == SerdeUsageDTO.SERIALIZE
@@ -157,7 +171,7 @@ public class MessagesController extends AbstractController implements MessagesAp
             ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
             ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
             : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
             : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         Mono.just(dto)
         Mono.just(dto)
             .subscribeOn(Schedulers.boundedElastic())
             .subscribeOn(Schedulers.boundedElastic())
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)

+ 64 - 35
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java

@@ -13,8 +13,10 @@ import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
 import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
 import com.provectus.kafka.ui.service.SchemaRegistryService;
 import com.provectus.kafka.ui.service.SchemaRegistryService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import javax.validation.Valid;
 import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
@@ -37,6 +39,7 @@ public class SchemasController extends AbstractController implements SchemasApi
 
 
   private final SchemaRegistryService schemaRegistryService;
   private final SchemaRegistryService schemaRegistryService;
   private final AccessControlService accessControlService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
 
   @Override
   @Override
   protected KafkaCluster getCluster(String clusterName) {
   protected KafkaCluster getCluster(String clusterName) {
@@ -51,13 +54,14 @@ public class SchemasController extends AbstractController implements SchemasApi
   public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
   public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
       String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schema(subject)
         .schema(subject)
         .schemaActions(SchemaAction.VIEW)
         .schemaActions(SchemaAction.VIEW)
-        .build());
+        .operationName("checkSchemaCompatibility")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         newSchemaSubjectMono.flatMap(subjectDTO ->
         newSchemaSubjectMono.flatMap(subjectDTO ->
                 schemaRegistryService.checksSchemaCompatibility(
                 schemaRegistryService.checksSchemaCompatibility(
                     getCluster(clusterName),
                     getCluster(clusterName),
@@ -66,19 +70,20 @@ public class SchemasController extends AbstractController implements SchemasApi
                 ))
                 ))
             .map(kafkaSrMapper::toDto)
             .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
   public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
       String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schemaActions(SchemaAction.CREATE)
         .schemaActions(SchemaAction.CREATE)
-        .build());
+        .operationName("createNewSchema")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         newSchemaSubjectMono.flatMap(newSubject ->
         newSchemaSubjectMono.flatMap(newSubject ->
                 schemaRegistryService.registerNewSchema(
                 schemaRegistryService.registerNewSchema(
                     getCluster(clusterName),
                     getCluster(clusterName),
@@ -87,20 +92,22 @@ public class SchemasController extends AbstractController implements SchemasApi
                 )
                 )
             ).map(kafkaSrMapper::toDto)
             ).map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteLatestSchema(
   public Mono<ResponseEntity<Void>> deleteLatestSchema(
       String clusterName, String subject, ServerWebExchange exchange) {
       String clusterName, String subject, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schema(subject)
         .schema(subject)
         .schemaActions(SchemaAction.DELETE)
         .schemaActions(SchemaAction.DELETE)
-        .build());
+        .operationName("deleteLatestSchema")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
         schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
             .thenReturn(ResponseEntity.ok().build())
     );
     );
   }
   }
@@ -108,14 +115,16 @@ public class SchemasController extends AbstractController implements SchemasApi
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteSchema(
   public Mono<ResponseEntity<Void>> deleteSchema(
       String clusterName, String subject, ServerWebExchange exchange) {
       String clusterName, String subject, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schema(subject)
         .schema(subject)
         .schemaActions(SchemaAction.DELETE)
         .schemaActions(SchemaAction.DELETE)
-        .build());
+        .operationName("deleteSchema")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
         schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
             .thenReturn(ResponseEntity.ok().build())
     );
     );
   }
   }
@@ -123,14 +132,16 @@ public class SchemasController extends AbstractController implements SchemasApi
   @Override
   @Override
   public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
   public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
       String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
       String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schema(subjectName)
         .schema(subjectName)
         .schemaActions(SchemaAction.DELETE)
         .schemaActions(SchemaAction.DELETE)
-        .build());
+        .operationName("deleteSchemaByVersion")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
         schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
             .thenReturn(ResponseEntity.ok().build())
     );
     );
   }
   }
@@ -138,16 +149,20 @@ public class SchemasController extends AbstractController implements SchemasApi
   @Override
   @Override
   public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
   public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
       String clusterName, String subjectName, ServerWebExchange exchange) {
       String clusterName, String subjectName, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schema(subjectName)
         .schema(subjectName)
         .schemaActions(SchemaAction.VIEW)
         .schemaActions(SchemaAction.VIEW)
-        .build());
+        .operationName("getAllVersionsBySubject")
+        .build();
 
 
     Flux<SchemaSubjectDTO> schemas =
     Flux<SchemaSubjectDTO> schemas =
         schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
         schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
             .map(kafkaSrMapper::toDto);
             .map(kafkaSrMapper::toDto);
-    return validateAccess.thenReturn(ResponseEntity.ok(schemas));
+
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(schemas))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -163,34 +178,37 @@ public class SchemasController extends AbstractController implements SchemasApi
   public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName,
   public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName,
                                                                 String subject,
                                                                 String subject,
                                                                 ServerWebExchange exchange) {
                                                                 ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schema(subject)
         .schema(subject)
         .schemaActions(SchemaAction.VIEW)
         .schemaActions(SchemaAction.VIEW)
-        .build());
+        .operationName("getLatestSchema")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
         schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
             .map(kafkaSrMapper::toDto)
             .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
   public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
       String clusterName, String subject, Integer version, ServerWebExchange exchange) {
       String clusterName, String subject, Integer version, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schema(subject)
         .schema(subject)
         .schemaActions(SchemaAction.VIEW)
         .schemaActions(SchemaAction.VIEW)
-        .build());
+        .operationName("getSchemaByVersion")
+        .operationParams(Map.of("subject", subject, "version", version))
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.getSchemaSubjectByVersion(
         schemaRegistryService.getSchemaSubjectByVersion(
                 getCluster(clusterName), subject, version)
                 getCluster(clusterName), subject, version)
             .map(kafkaSrMapper::toDto)
             .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
@@ -199,6 +217,11 @@ public class SchemasController extends AbstractController implements SchemasApi
                                                                     @Valid Integer perPage,
                                                                     @Valid Integer perPage,
                                                                     @Valid String search,
                                                                     @Valid String search,
                                                                     ServerWebExchange serverWebExchange) {
                                                                     ServerWebExchange serverWebExchange) {
+    var context = AccessContext.builder()
+        .cluster(clusterName)
+        .operationName("getSchemas")
+        .build();
+
     return schemaRegistryService
     return schemaRegistryService
         .getAllSubjectNames(getCluster(clusterName))
         .getAllSubjectNames(getCluster(clusterName))
         .flatMapIterable(l -> l)
         .flatMapIterable(l -> l)
@@ -220,25 +243,28 @@ public class SchemasController extends AbstractController implements SchemasApi
           return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
           return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
               .map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
               .map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
               .map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
               .map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
-        }).map(ResponseEntity::ok);
+        }).map(ResponseEntity::ok)
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
   }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
   public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
       String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schemaActions(SchemaAction.MODIFY_GLOBAL_COMPATIBILITY)
         .schemaActions(SchemaAction.MODIFY_GLOBAL_COMPATIBILITY)
-        .build());
+        .operationName("updateGlobalSchemaCompatibilityLevel")
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         compatibilityLevelMono
         compatibilityLevelMono
             .flatMap(compatibilityLevelDTO ->
             .flatMap(compatibilityLevelDTO ->
                 schemaRegistryService.updateGlobalSchemaCompatibility(
                 schemaRegistryService.updateGlobalSchemaCompatibility(
                     getCluster(clusterName),
                     getCluster(clusterName),
                     kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
                     kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
                 ))
                 ))
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
             .thenReturn(ResponseEntity.ok().build())
     );
     );
   }
   }
@@ -247,12 +273,14 @@ public class SchemasController extends AbstractController implements SchemasApi
   public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
   public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
       String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .cluster(clusterName)
         .schemaActions(SchemaAction.EDIT)
         .schemaActions(SchemaAction.EDIT)
-        .build());
+        .operationName("updateSchemaCompatibilityLevel")
+        .operationParams(Map.of("subject", subject))
+        .build();
 
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         compatibilityLevelMono
         compatibilityLevelMono
             .flatMap(compatibilityLevelDTO ->
             .flatMap(compatibilityLevelDTO ->
                 schemaRegistryService.updateSchemaCompatibility(
                 schemaRegistryService.updateSchemaCompatibility(
@@ -260,6 +288,7 @@ public class SchemasController extends AbstractController implements SchemasApi
                     subject,
                     subject,
                     kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
                     kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
                 ))
                 ))
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
             .thenReturn(ResponseEntity.ok().build())
     );
     );
   }
   }

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است