瀏覽代碼

Merge branch 'master' into issues/3883

Ilya Kuramshin 2 年之前
父節點
當前提交
0f00a2916e
共有 100 個文件被更改,包括 3766 次插入1458 次删除
  1. 2 2
      .github/CODEOWNERS
  2. 3 0
      .github/ISSUE_TEMPLATE/config.yml
  3. 0 92
      .github/ISSUE_TEMPLATE/helm.yml
  4. 1 1
      .github/workflows/block_merge.yml
  5. 14 10
      .github/workflows/branch-deploy.yml
  6. 0 6
      .github/workflows/branch-remove.yml
  7. 1 2
      .github/workflows/build-public-image.yml
  8. 0 28
      .github/workflows/create-branch-for-helm.yaml
  9. 1 1
      .github/workflows/cve.yaml
  10. 0 6
      .github/workflows/delete-public-image.yml
  11. 0 38
      .github/workflows/helm.yaml
  12. 1 1
      .github/workflows/pr-checks.yaml
  13. 0 39
      .github/workflows/release-helm.yaml
  14. 4 5
      .github/workflows/release.yaml
  15. 3 0
      .gitignore
  16. 1 1
      README.md
  17. 0 25
      charts/kafka-ui/.helmignore
  18. 0 7
      charts/kafka-ui/Chart.yaml
  19. 0 1
      charts/kafka-ui/README.md
  20. 0 3
      charts/kafka-ui/index.yaml
  21. 0 21
      charts/kafka-ui/templates/NOTES.txt
  22. 0 84
      charts/kafka-ui/templates/_helpers.tpl
  23. 0 10
      charts/kafka-ui/templates/configmap.yaml
  24. 0 11
      charts/kafka-ui/templates/configmap_fromValues.yaml
  25. 0 150
      charts/kafka-ui/templates/deployment.yaml
  26. 0 46
      charts/kafka-ui/templates/hpa.yaml
  27. 0 89
      charts/kafka-ui/templates/ingress.yaml
  28. 0 18
      charts/kafka-ui/templates/networkpolicy-egress.yaml
  29. 0 18
      charts/kafka-ui/templates/networkpolicy-ingress.yaml
  30. 0 13
      charts/kafka-ui/templates/secret.yaml
  31. 0 22
      charts/kafka-ui/templates/service.yaml
  32. 0 12
      charts/kafka-ui/templates/serviceaccount.yaml
  33. 0 161
      charts/kafka-ui/values.yaml
  34. 1 1
      documentation/compose/jmx-exporter/kafka-broker.yml
  35. 2 0
      documentation/compose/kafka-ui-arm64.yaml
  36. 1 1
      kafka-ui-api/pom.xml
  37. 12 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
  38. 31 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java
  39. 12 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AclsController.java
  40. 32 24
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ApplicationConfigController.java
  41. 53 29
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
  42. 11 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java
  43. 33 22
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
  44. 94 58
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java
  45. 37 25
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java
  46. 40 15
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
  47. 64 35
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java
  48. 92 66
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java
  49. 7 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/JsonAvroConversionException.java
  50. 3 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ConsumerGroupMapper.java
  51. 7 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalConsumerGroup.java
  52. 4 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopicConsumerGroup.java
  53. 34 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/AccessContext.java
  54. 5 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Permission.java
  55. 2 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Resource.java
  56. 14 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/AuditAction.java
  57. 5 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/PermissibleAction.java
  58. 24 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/SerdeInstance.java
  59. 24 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/SerdesInitializer.java
  60. 294 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/ConsumerOffsetsSerde.java
  61. 6 14
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/ProtobufFileSerde.java
  62. 4 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/sr/AvroSchemaRegistrySerializer.java
  63. 14 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/sr/MessageFormatter.java
  64. 33 37
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/sr/SchemaRegistrySerde.java
  65. 2 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerService.java
  66. 1 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumerGroupService.java
  67. 12 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConfigSanitizer.java
  68. 4 15
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
  69. 52 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java
  70. 9 12
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/TopicsService.java
  71. 97 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/audit/AuditRecord.java
  72. 209 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/audit/AuditService.java
  73. 78 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/audit/AuditWriter.java
  74. 33 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/rbac/AccessControlService.java
  75. 16 26
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
  76. 542 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/JsonAvroConversion.java
  77. 3 0
      kafka-ui-api/src/main/resources/application-local.yml
  78. 2 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/AbstractIntegrationTest.java
  79. 185 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/serdes/builtin/ConsumerOffsetsSerdeTest.java
  80. 1 14
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/serdes/builtin/ProtobufFileSerdeTest.java
  81. 171 5
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/serdes/builtin/sr/SchemaRegistrySerdeTest.java
  82. 24 4
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KafkaConfigSanitizerTest.java
  83. 41 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java
  84. 3 1
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SchemaRegistryPaginationTest.java
  85. 3 3
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/TopicsServicePaginationTest.java
  86. 87 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/audit/AuditIntegrationTest.java
  87. 154 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/audit/AuditServiceTest.java
  88. 1 1
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/AccessControlServiceMock.java
  89. 713 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/JsonAvroConversionTest.java
  90. 68 2
      kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
  91. 1 1
      kafka-ui-e2e-checks/pom.xml
  92. 5 0
      kafka-ui-e2e-checks/src/main/java/com/provectus/kafka/ui/pages/BasePage.java
  93. 54 2
      kafka-ui-e2e-checks/src/main/java/com/provectus/kafka/ui/pages/brokers/BrokersConfigTab.java
  94. 15 0
      kafka-ui-e2e-checks/src/main/java/com/provectus/kafka/ui/utilities/StringUtils.java
  95. 8 30
      kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/manualsuite/backlog/SmokeBacklog.java
  96. 108 7
      kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/smokesuite/brokers/BrokersTest.java
  97. 3 1
      kafka-ui-react-app/package.json
  98. 26 12
      kafka-ui-react-app/pnpm-lock.yaml
  99. 5 3
      kafka-ui-react-app/src/components/App.tsx
  100. 4 1
      kafka-ui-react-app/src/components/Brokers/Broker/Broker.tsx

+ 2 - 2
.github/CODEOWNERS

@@ -14,5 +14,5 @@
 # TESTS
 /kafka-ui-e2e-checks/       @provectus/kafka-qa
 
-# HELM CHARTS
-/charts/                    @provectus/kafka-devops
+# INFRA
+/.github/workflows/         @provectus/kafka-devops

+ 3 - 0
.github/ISSUE_TEMPLATE/config.yml

@@ -1,5 +1,8 @@
 blank_issues_enabled: false
 contact_links:
+  - name: Report helm issue
+    url: https://github.com/provectus/kafka-ui-charts
+    about: Our helm charts are located in another repo. Please raise issues/PRs regarding charts in that repo.
   - name: Official documentation
     url: https://docs.kafka-ui.provectus.io/
     about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.

+ 0 - 92
.github/ISSUE_TEMPLATE/helm.yml

@@ -1,92 +0,0 @@
-name: "⎈ K8s/Helm problem report"
-description: "Report a problem with k8s/helm charts/etc"
-labels: ["status/triage", "scope/k8s"]
-assignees: []
-
-body:
-  - type: markdown
-    attributes:
-      value: |
-        Hi, thanks for raising the issue(-s), all contributions really matter!
-        Please, note that we'll close the issue without further explanation if you don't follow
-        this template and don't provide the information requested within this template.
-
-  - type: checkboxes
-    id: terms
-    attributes:
-      label: Issue submitter TODO list
-      description: By you checking these checkboxes we can be sure you've done the essential things.
-      options:
-        - label: I've looked up my issue in [FAQ](https://docs.kafka-ui.provectus.io/faq/common-problems)
-          required: true
-        - label: I've searched for an already existing issues [here](https://github.com/provectus/kafka-ui/issues)
-          required: true
-        - label: I've tried running `master`-labeled docker image and the issue still persists there
-          required: true
-        - label: I'm running a supported version of the application which is listed [here](https://github.com/provectus/kafka-ui/blob/master/SECURITY.md)
-          required: true
-
-  - type: textarea
-    attributes:
-      label: Describe the bug (actual behavior)
-      description: A clear and concise description of what the bug is. Use a list, if there is more than one problem
-    validations:
-      required: true
-
-  - type: textarea
-    attributes:
-      label: Expected behavior
-      description: A clear and concise description of what you expected to happen
-    validations:
-      required: false
-
-  - type: textarea
-    attributes:
-      label: Your installation details
-      description: |
-        How do you run the app? Please provide as much info as possible:
-        1. App version (commit hash in the top left corner of the UI)
-        2. Helm chart version
-        3. Your application config. Please remove the sensitive info like passwords or API keys.
-        4. Any IAAC configs
-    validations:
-      required: true
-
-  - type: textarea
-    attributes:
-      label: Steps to reproduce
-      description: |
-        Please write down the order of the actions required to reproduce the issue.
-        For the advanced setups/complicated issue, we might need you to provide
-        a minimal [reproducible example](https://stackoverflow.com/help/minimal-reproducible-example).
-    validations:
-      required: true
-
-  - type: textarea
-    attributes:
-      label: Screenshots
-      description: |
-        If applicable, add screenshots to help explain your problem
-    validations:
-      required: false
-
-  - type: textarea
-    attributes:
-      label: Logs
-      description: |
-        If applicable, *upload* screenshots to help explain your problem
-    validations:
-      required: false
-
-  - type: textarea
-    attributes:
-      label: Additional context
-      description: |
-        Add any other context about the problem here. E.G.:
-        1. Are there any alternative scenarios (different data/methods/configuration/setup) you have tried?
-          Were they successful or the same issue occurred? Please provide steps as well.
-        2. Related issues (if there are any).
-        3. Logs (if available)
-        4. Is there any serious impact or behaviour on the end-user because of this issue, that can be overlooked?
-    validations:
-      required: false

+ 1 - 1
.github/workflows/block_merge.yml

@@ -6,7 +6,7 @@ jobs:
   block_merge:
     runs-on: ubuntu-latest
     steps:
-      - uses: mheap/github-action-required-labels@v4
+      - uses: mheap/github-action-required-labels@v5
         with:
           mode: exactly
           count: 0

+ 14 - 10
.github/workflows/branch-deploy.yml

@@ -84,18 +84,22 @@ jobs:
           git add ../kafka-ui-from-branch/
           git commit -m "added env:${{ needs.build.outputs.deploy }}" && git push || true
 
-      - name: make comment with private deployment link
+      - name: update status check for private deployment
         if: ${{ github.event.label.name == 'status/feature_testing' }}
-        uses: peter-evans/create-or-update-comment@v3
+        uses: Sibz/github-status-action@v1.1.6
         with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Custom deployment will be available at http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io
+          authToken: ${{secrets.GITHUB_TOKEN}}
+          context: "Click Details button to open custom deployment page"
+          state: "success"
+          sha: ${{ github.event.pull_request.head.sha  || github.sha }}
+          target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"
 
-      - name: make comment with public deployment link
+      - name: update status check for public deployment
         if: ${{ github.event.label.name == 'status/feature_testing_public' }}
-        uses: peter-evans/create-or-update-comment@v3
+        uses: Sibz/github-status-action@v1.1.6
         with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Custom deployment will be available at http://${{ needs.build.outputs.tag }}.kafka-ui.provectus.io in 5 minutes
+          authToken: ${{secrets.GITHUB_TOKEN}}
+          context: "Click Details button to open custom deployment page"
+          state: "success"
+          sha: ${{ github.event.pull_request.head.sha  || github.sha }}
+          target_url: "http://${{ needs.build.outputs.tag }}.internal.kafka-ui.provectus.io"

+ 0 - 6
.github/workflows/branch-remove.yml

@@ -20,9 +20,3 @@ jobs:
           git config --global user.name "infra-tech"
           git add ../kafka-ui-from-branch/
           git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
-      - name: make comment with deployment link
-        uses: peter-evans/create-or-update-comment@v3
-        with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Custom deployment removed

+ 1 - 2
.github/workflows/build-public-image.yml

@@ -65,11 +65,10 @@ jobs:
           cache-from: type=local,src=/tmp/.buildx-cache
           cache-to: type=local,dest=/tmp/.buildx-cache
       - name: make comment with private deployment link
-        uses: peter-evans/create-or-update-comment@v3
+        uses: peter-evans/create-or-update-comment@v2
         with:
           issue-number: ${{ github.event.pull_request.number }}
           body: |
             Image published at public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }}
-
     outputs:
       tag: ${{ steps.extract_branch.outputs.tag }}

+ 0 - 28
.github/workflows/create-branch-for-helm.yaml

@@ -1,28 +0,0 @@
-name: Prepare helm release
-on:
-  repository_dispatch:
-    types: [prepare-helm-release]
-jobs:
-  change-app-version:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - run: |
-          git config user.name github-actions
-          git config user.email github-actions@github.com
-      - name: Change versions
-        run: |
-          git checkout -b release-${{ github.event.client_payload.appversion}}
-          version=$(cat charts/kafka-ui/Chart.yaml  | grep version | awk '{print $2}')
-          version=${version%.*}.$((${version##*.}+1))
-          sed -i "s/version:.*/version: ${version}/" charts/kafka-ui/Chart.yaml
-          sed -i "s/appVersion:.*/appVersion: ${{ github.event.client_payload.appversion}}/" charts/kafka-ui/Chart.yaml
-          git add  charts/kafka-ui/Chart.yaml
-          git commit -m "release ${version}"
-          git push --set-upstream origin release-${{ github.event.client_payload.appversion}}
-      - name: Slack Notification
-        uses: rtCamp/action-slack-notify@v2
-        env:
-          SLACK_TITLE: "release-${{ github.event.client_payload.appversion}}"
-          SLACK_MESSAGE: "A new release of the helm chart has been prepared. Branch name: release-${{ github.event.client_payload.appversion}}"
-          SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}

+ 1 - 1
.github/workflows/cve.yaml

@@ -55,7 +55,7 @@ jobs:
           cache-to: type=local,dest=/tmp/.buildx-cache
 
       - name: Run CVE checks
-        uses: aquasecurity/trivy-action@0.10.0
+        uses: aquasecurity/trivy-action@0.11.2
         with:
           image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
           format: "table"

+ 0 - 6
.github/workflows/delete-public-image.yml

@@ -32,9 +32,3 @@ jobs:
                 --repository-name kafka-ui-custom-build \
                 --image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
                 --region us-east-1
-      - name: make comment with private deployment link
-        uses: peter-evans/create-or-update-comment@v3
-        with:
-          issue-number: ${{ github.event.pull_request.number }}
-          body: |
-            Image tag public.ecr.aws/provectus/kafka-ui-custom-build:${{ steps.extract_branch.outputs.tag }} has been removed

+ 0 - 38
.github/workflows/helm.yaml

@@ -1,38 +0,0 @@
-name: Helm linter
-on:
- pull_request:
-  types: ["opened", "edited", "reopened", "synchronize"]
-  branches:
-   - 'master'
-  paths:
-   - "charts/**"
-jobs:
-  build-and-test:
-    runs-on: ubuntu-latest
-    steps:
-      - uses: actions/checkout@v3
-      - name: Helm tool installer
-        uses: Azure/setup-helm@v3
-      - name: Setup Kubeval
-        uses: lra/setup-kubeval@v1.0.1
-      #check, was helm version increased in Chart.yaml?
-      - name: Check version
-        shell: bash
-        run: |
-          helm_version_new=$(cat charts/kafka-ui/Chart.yaml  | grep version | awk  '{print $2}')
-          helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml |   grep version | awk  '{print $2}' )
-          echo $helm_version_old
-          echo $helm_version_new
-          if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
-      - name: Run kubeval
-        shell: bash
-        run: |
-          sed -i "s@enabled: false@enabled: true@g" charts/kafka-ui/values.yaml
-          K8S_VERSIONS=$(git ls-remote --refs --tags https://github.com/kubernetes/kubernetes.git | cut -d/ -f3 | grep -e '^v1\.[0-9]\{2\}\.[0]\{1,2\}$' | grep -v -e  '^v1\.1[0-7]\{1\}' | cut -c2-)
-          echo "NEXT K8S VERSIONS ARE GOING TO BE TESTED: $K8S_VERSIONS"
-          echo ""
-          for version in $K8S_VERSIONS
-            do
-              echo $version;
-              helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
-            done

+ 1 - 1
.github/workflows/pr-checks.yaml

@@ -7,7 +7,7 @@ jobs:
   task-check:
     runs-on: ubuntu-latest
     steps:
-      - uses: kentaro-m/task-completed-checker-action@v0.1.1
+      - uses: kentaro-m/task-completed-checker-action@v0.1.2
         with:
           repo-token: "${{ secrets.GITHUB_TOKEN }}"
       - uses: dekinderfiets/pr-description-enforcer@0.0.1

+ 0 - 39
.github/workflows/release-helm.yaml

@@ -1,39 +0,0 @@
-name: Release helm
-on:
- push:
-    branches:
-     - master
-    paths:
-      - "charts/**"
-
-jobs:
- release-helm:
-  runs-on:
-   ubuntu-latest
-  steps:
-      - uses: actions/checkout@v3
-        with:
-          fetch-depth: 1
-
-      - run: |
-          git config user.name github-actions
-          git config user.email github-actions@github.com
-
-      - uses: azure/setup-helm@v3
-
-      - name: add chart #realse helm with new version
-        run: |
-          VERSION=$(cat charts/kafka-ui/Chart.yaml  | grep version | awk '{print $2}')
-          echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
-          MSG=$(helm package charts/kafka-ui)
-          git fetch origin
-          git stash
-          git checkout -b gh-pages origin/gh-pages
-          git pull
-          helm repo index .
-          git add -f ${MSG##*/} index.yaml
-          git commit -m "release ${VERSION}"
-          git push
-      - uses: rickstaa/action-create-tag@v1 #create new tag
-        with:
-          tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"

+ 4 - 5
.github/workflows/release.yaml

@@ -34,7 +34,7 @@ jobs:
           echo "version=${VERSION}" >> $GITHUB_OUTPUT
 
       - name: Upload files to a GitHub release
-        uses: svenstaro/upload-release-action@2.5.0
+        uses: svenstaro/upload-release-action@2.6.1
         with:
           repo_token: ${{ secrets.GITHUB_TOKEN }}
           file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
@@ -77,6 +77,7 @@ jobs:
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api
           platforms: linux/amd64,linux/arm64
+          provenance: false
           push: true
           tags: |
             provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
@@ -88,14 +89,12 @@ jobs:
 
   charts:
     runs-on: ubuntu-latest
-    permissions:
-      contents: write
     needs: release
     steps:
       - name: Repository Dispatch
         uses: peter-evans/repository-dispatch@v2
         with:
-          token: ${{ secrets.GITHUB_TOKEN }}
-          repository: provectus/kafka-ui
+          token: ${{ secrets.CHARTS_ACTIONS_TOKEN }}
+          repository: provectus/kafka-ui-charts
           event-type: prepare-helm-release
           client-payload: '{"appversion": "${{ needs.release.outputs.version }}"}'

+ 3 - 0
.gitignore

@@ -31,6 +31,9 @@ build/
 .vscode/
 /kafka-ui-api/app/node
 
+### SDKMAN ###
+.sdkmanrc
+
 .DS_Store
 *.code-workspace
 

+ 1 - 1
README.md

@@ -99,7 +99,7 @@ services:
     ports:
       - 8080:8080
     environment:
-      DYNAMIC_CONFIG_ENABLED: true
+      DYNAMIC_CONFIG_ENABLED: 'true'
     volumes:
       - ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
 ```

+ 0 - 25
charts/kafka-ui/.helmignore

@@ -1,25 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*.orig
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
-example/
-README.md

+ 0 - 7
charts/kafka-ui/Chart.yaml

@@ -1,7 +0,0 @@
-apiVersion: v2
-name: kafka-ui
-description: A Helm chart for kafka-UI
-type: application
-version: 0.7.0
-appVersion: v0.7.0
-icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png

+ 0 - 1
charts/kafka-ui/README.md

@@ -1 +0,0 @@
-Please refer to our [documentation](https://docs.kafka-ui.provectus.io/configuration/helm-charts) to get some info on our helm charts.

+ 0 - 3
charts/kafka-ui/index.yaml

@@ -1,3 +0,0 @@
-apiVersion: v1
-entries: {}
-generated: "2021-11-11T12:26:08.479581+03:00"

+ 0 - 21
charts/kafka-ui/templates/NOTES.txt

@@ -1,21 +0,0 @@
-1. Get the application URL by running these commands:
-{{- if .Values.ingress.enabled }}
-{{- range $host := .Values.ingress.hosts }}
-  {{- range .paths }}
-  http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }}
-  {{- end }}
-{{- end }}
-{{- else if contains "NodePort" .Values.service.type }}
-  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "kafka-ui.fullname" . }})
-  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
-  echo http://$NODE_IP:$NODE_PORT
-{{- else if contains "LoadBalancer" .Values.service.type }}
-     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
-           You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "kafka-ui.fullname" . }}'
-  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "kafka-ui.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
-  echo http://$SERVICE_IP:{{ .Values.service.port }}
-{{- else if contains "ClusterIP" .Values.service.type }}
-  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
-  echo "Visit http://127.0.0.1:8080 to use your application"
-  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
-{{- end }}

+ 0 - 84
charts/kafka-ui/templates/_helpers.tpl

@@ -1,84 +0,0 @@
-{{/* vim: set filetype=mustache: */}}
-{{/*
-Expand the name of the chart.
-*/}}
-{{- define "kafka-ui.name" -}}
-{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Create a default fully qualified app name.
-We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
-If release name contains chart name it will be used as a full name.
-*/}}
-{{- define "kafka-ui.fullname" -}}
-{{- if .Values.fullnameOverride }}
-{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- $name := default .Chart.Name .Values.nameOverride }}
-{{- if contains $name .Release.Name }}
-{{- .Release.Name | trunc 63 | trimSuffix "-" }}
-{{- else }}
-{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
-{{- end }}
-{{- end }}
-{{- end }}
-
-{{/*
-Create chart name and version as used by the chart label.
-*/}}
-{{- define "kafka-ui.chart" -}}
-{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
-{{- end }}
-
-{{/*
-Common labels
-*/}}
-{{- define "kafka-ui.labels" -}}
-helm.sh/chart: {{ include "kafka-ui.chart" . }}
-{{ include "kafka-ui.selectorLabels" . }}
-{{- if .Chart.AppVersion }}
-app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
-{{- end }}
-app.kubernetes.io/managed-by: {{ .Release.Service }}
-{{- end }}
-
-{{/*
-Selector labels
-*/}}
-{{- define "kafka-ui.selectorLabels" -}}
-app.kubernetes.io/name: {{ include "kafka-ui.name" . }}
-app.kubernetes.io/instance: {{ .Release.Name }}
-{{- end }}
-
-{{/*
-Create the name of the service account to use
-*/}}
-{{- define "kafka-ui.serviceAccountName" -}}
-{{- if .Values.serviceAccount.create }}
-{{- default (include "kafka-ui.fullname" .) .Values.serviceAccount.name }}
-{{- else }}
-{{- default "default" .Values.serviceAccount.name }}
-{{- end }}
-{{- end }}
-
-
-{{/*
-This allows us to check if the registry of the image is specified or not.
-*/}}
-{{- define "kafka-ui.imageName" -}}
-{{- $registryName := .Values.image.registry -}}
-{{- if .Values.global }}
-    {{- if .Values.global.imageRegistry }}
-     {{- $registryName = .Values.global.imageRegistry -}}
-    {{- end -}}
-{{- end -}}
-{{- $repository := .Values.image.repository -}}
-{{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
-{{- if $registryName }}
-{{- printf "%s/%s:%s" $registryName $repository $tag -}}
-{{- else }}
-{{- printf "%s:%s" $repository $tag -}}
-{{- end }}
-{{- end -}}
-

+ 0 - 10
charts/kafka-ui/templates/configmap.yaml

@@ -1,10 +0,0 @@
-{{- if .Values.envs.config -}}
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-data:
-  {{- toYaml .Values.envs.config | nindent 2 }}
-{{- end -}}

+ 0 - 11
charts/kafka-ui/templates/configmap_fromValues.yaml

@@ -1,11 +0,0 @@
-{{- if .Values.yamlApplicationConfig -}}
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}-fromvalues
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-data:
-  config.yml: |-
-    {{- toYaml .Values.yamlApplicationConfig | nindent 4}}
-{{ end }}

+ 0 - 150
charts/kafka-ui/templates/deployment.yaml

@@ -1,150 +0,0 @@
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-  {{- with .Values.annotations }}
-  annotations:
-    {{- toYaml . | nindent 4 }}
-  {{- end }}
-spec:
-{{- if not .Values.autoscaling.enabled }}
-  replicas: {{ .Values.replicaCount }}
-{{- end }}
-  selector:
-    matchLabels:
-      {{- include "kafka-ui.selectorLabels" . | nindent 6 }}
-  template:
-    metadata:
-      annotations:
-      {{- with .Values.podAnnotations }}
-          {{- toYaml . | nindent 8 }}
-      {{- end }}
-        checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
-        checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
-        checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
-      labels:
-        {{- include "kafka-ui.selectorLabels" . | nindent 8 }}
-        {{- if .Values.podLabels }}
-        {{- toYaml .Values.podLabels | nindent 8 }}
-        {{- end }}
-    spec:
-      {{- with .Values.imagePullSecrets }}
-      imagePullSecrets:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .Values.initContainers }}
-      initContainers:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      serviceAccountName: {{ include "kafka-ui.serviceAccountName" . }}
-      securityContext:
-        {{- toYaml .Values.podSecurityContext | nindent 8 }}
-      containers:
-        - name: {{ .Chart.Name }}
-          securityContext:
-            {{- toYaml .Values.securityContext | nindent 12 }}
-          image: {{ include "kafka-ui.imageName" . }}
-          imagePullPolicy: {{ .Values.image.pullPolicy }}
-          {{- if or .Values.env  .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
-          env:
-            {{- with .Values.env }}
-              {{- toYaml . | nindent 12 }}
-            {{- end }}
-            {{- if or .Values.yamlApplicationConfig .Values.yamlApplicationConfigConfigMap}}
-            - name: SPRING_CONFIG_ADDITIONAL-LOCATION
-              {{- if .Values.yamlApplicationConfig }}
-              value: /kafka-ui/config.yml
-              {{- else if .Values.yamlApplicationConfigConfigMap }}
-              value: /kafka-ui/{{ .Values.yamlApplicationConfigConfigMap.keyName | default "config.yml" }}
-              {{- end }}
-            {{- end }}
-          {{- end }}
-          envFrom:
-            {{- if .Values.existingConfigMap }}
-            - configMapRef:
-                name: {{ .Values.existingConfigMap }}
-            {{- end }}
-            {{- if .Values.envs.config }}
-            - configMapRef:
-                name: {{ include "kafka-ui.fullname" . }}
-            {{- end }}
-            {{- if .Values.existingSecret }}
-            - secretRef:
-                name: {{ .Values.existingSecret }}
-            {{- end }}
-            {{- if .Values.envs.secret}}
-            - secretRef:
-                name: {{ include "kafka-ui.fullname" . }}
-            {{- end}}    
-          ports:
-            - name: http
-              containerPort: 8080
-              protocol: TCP
-          livenessProbe:
-            httpGet:
-              {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
-              path: {{ get $contextPath "path" }}
-              port: http
-              {{- if .Values.probes.useHttpsScheme }}
-              scheme: HTTPS
-              {{- end }}
-            initialDelaySeconds: 60
-            periodSeconds: 30
-            timeoutSeconds: 10
-          readinessProbe:
-            httpGet:
-              {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/actuator/health" | urlParse }}
-              path: {{ get $contextPath "path" }}
-              port: http
-              {{- if .Values.probes.useHttpsScheme }}
-              scheme: HTTPS
-              {{- end }}
-            initialDelaySeconds: 60
-            periodSeconds: 30
-            timeoutSeconds: 10
-          resources:
-            {{- toYaml .Values.resources | nindent 12 }}
-          {{- if or .Values.yamlApplicationConfig .Values.volumeMounts .Values.yamlApplicationConfigConfigMap}}
-          volumeMounts:
-            {{- with .Values.volumeMounts }} 
-              {{- toYaml . | nindent 12 }}
-            {{- end }}
-            {{- if .Values.yamlApplicationConfig }}
-            - name: kafka-ui-yaml-conf
-              mountPath: /kafka-ui/
-            {{- end }}
-            {{- if .Values.yamlApplicationConfigConfigMap}}
-            - name: kafka-ui-yaml-conf-configmap
-              mountPath: /kafka-ui/
-            {{- end }}
-          {{- end }}
-      {{- if or .Values.yamlApplicationConfig .Values.volumes .Values.yamlApplicationConfigConfigMap}}
-      volumes:
-        {{- with .Values.volumes }}
-          {{- toYaml . | nindent 8 }}
-        {{- end }}
-        {{- if .Values.yamlApplicationConfig }}
-        - name: kafka-ui-yaml-conf
-          configMap: 
-            name: {{ include "kafka-ui.fullname" . }}-fromvalues
-        {{- end }}
-        {{- if .Values.yamlApplicationConfigConfigMap}}
-        - name: kafka-ui-yaml-conf-configmap
-          configMap: 
-            name: {{ .Values.yamlApplicationConfigConfigMap.name }}
-        {{- end }}
-      {{- end }}
-      {{- with .Values.nodeSelector }}
-      nodeSelector:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .Values.affinity }}
-      affinity:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}
-      {{- with .Values.tolerations }}
-      tolerations:
-        {{- toYaml . | nindent 8 }}
-      {{- end }}

+ 0 - 46
charts/kafka-ui/templates/hpa.yaml

@@ -1,46 +0,0 @@
-{{- if .Values.autoscaling.enabled }}
-{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
-{{- $isHigher1p25 := ge (semver "1.25" | $kubeCapabilityVersion.Compare) 0 -}}
-{{- if and ($.Capabilities.APIVersions.Has "autoscaling/v2") $isHigher1p25 -}}
-apiVersion: autoscaling/v2
-{{- else  }}
-apiVersion: autoscaling/v2beta1
-{{- end }}
-kind: HorizontalPodAutoscaler
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-spec:
-  scaleTargetRef:
-    apiVersion: apps/v1
-    kind: Deployment
-    name: {{ include "kafka-ui.fullname" . }}
-  minReplicas: {{ .Values.autoscaling.minReplicas }}
-  maxReplicas: {{ .Values.autoscaling.maxReplicas }}
-  metrics:
-  {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
-    - type: Resource
-      resource:
-        name: cpu
-  {{- if  $isHigher1p25 }}
-        target:
-         type: Utilization
-         averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
-  {{- else  }}        
-        targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
-  {{- end }}      
-  {{- end }}
-  {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
-    - type: Resource
-      resource:
-        name: memory
-  {{- if  $isHigher1p25 }}     
-        target:
-          type: Utilization
-          averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
-  {{- else  }}   
-        targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
-  {{- end }}
-  {{- end }}
-{{- end }}

+ 0 - 89
charts/kafka-ui/templates/ingress.yaml

@@ -1,89 +0,0 @@
-{{- if .Values.ingress.enabled -}}
-{{- $fullName := include "kafka-ui.fullname" . -}}
-{{- $svcPort := .Values.service.port -}}
-{{- $kubeCapabilityVersion := semver .Capabilities.KubeVersion.Version -}}
-{{- $isHigher1p19 := ge (semver "1.19" | $kubeCapabilityVersion.Compare) 0 -}}
-{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
-apiVersion: networking.k8s.io/v1
-{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }}
-apiVersion: networking.k8s.io/v1beta1
-{{- else }}
-apiVersion: extensions/v1beta1
-{{- end }}
-kind: Ingress
-metadata:
-  name: {{ $fullName }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-  {{- with .Values.ingress.annotations }}
-  annotations:
-    {{- toYaml . | nindent 4 }}
-  {{- end }}
-spec:
-  {{- if .Values.ingress.tls.enabled }}
-  tls:
-    - hosts:
-        - {{ tpl .Values.ingress.host . }}
-      secretName: {{ .Values.ingress.tls.secretName }}
-  {{- end }}
-  {{- if .Values.ingress.ingressClassName }}
-  ingressClassName: {{ .Values.ingress.ingressClassName }}
-  {{- end }}
-  rules:
-    - http:
-        paths:
-{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
-          {{- range .Values.ingress.precedingPaths }}
-          - path: {{ .path }}
-            pathType: {{ .Values.ingress.pathType }}
-            backend:
-              service:
-                name: {{ .serviceName }}
-                port:
-                  number: {{ .servicePort }}
-          {{- end }}
-          - backend:
-              service:
-                name: {{ $fullName }}
-                port:
-                  number: {{ $svcPort }}
-            pathType: {{ .Values.ingress.pathType }}
-{{- if .Values.ingress.path }}
-            path: {{ .Values.ingress.path }}
-{{- end }}
-          {{- range .Values.ingress.succeedingPaths }}
-          - path: {{ .path }}
-            pathType: {{ .Values.ingress.pathType }}
-            backend:
-              service:
-                name: {{ .serviceName }}
-                port:
-                  number: {{ .servicePort }}
-          {{- end }}
-{{- if tpl .Values.ingress.host . }}
-      host: {{tpl .Values.ingress.host . }}
-{{- end }}
-{{- else -}}
-          {{- range .Values.ingress.precedingPaths }}
-          - path: {{ .path }}
-            backend:
-              serviceName: {{ .serviceName }}
-              servicePort: {{ .servicePort }}
-          {{- end }}
-          - backend:
-              serviceName: {{ $fullName }}
-              servicePort: {{ $svcPort }}
-{{- if .Values.ingress.path }}
-            path: {{ .Values.ingress.path }}
-{{- end }}
-          {{- range .Values.ingress.succeedingPaths }}
-          - path: {{ .path }}
-            backend:
-              serviceName: {{ .serviceName }}
-              servicePort: {{ .servicePort }}
-          {{- end }}
-{{- if tpl .Values.ingress.host . }}
-      host: {{ tpl .Values.ingress.host . }}
-{{- end }}
-{{- end }}
-{{- end }}

+ 0 - 18
charts/kafka-ui/templates/networkpolicy-egress.yaml

@@ -1,18 +0,0 @@
-{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }}
-apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
-  name: {{ printf "%s-egress" (include "kafka-ui.fullname" .) }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-spec:
-  podSelector:
-    matchLabels:
-      {{- include "kafka-ui.selectorLabels" . | nindent 6 }}
-  policyTypes:
-    - Egress
-  egress:
-    {{- if .Values.networkPolicy.egressRules.customRules }}
-    {{- toYaml .Values.networkPolicy.egressRules.customRules | nindent 4 }}
-    {{- end }}
-{{- end }}

+ 0 - 18
charts/kafka-ui/templates/networkpolicy-ingress.yaml

@@ -1,18 +0,0 @@
-{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.ingressRules.customRules }}
-apiVersion: networking.k8s.io/v1
-kind: NetworkPolicy
-metadata:
-  name: {{ printf "%s-ingress" (include "kafka-ui.fullname" .) }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-spec:
-  podSelector:
-    matchLabels:
-      {{- include "kafka-ui.selectorLabels" . | nindent 6 }}
-  policyTypes:
-    - Ingress
-  ingress:
-    {{- if .Values.networkPolicy.ingressRules.customRules }}
-    {{- toYaml .Values.networkPolicy.ingressRules.customRules | nindent 4 }}
-    {{- end }}
-{{- end }}

+ 0 - 13
charts/kafka-ui/templates/secret.yaml

@@ -1,13 +0,0 @@
-{{- if .Values.envs.secret -}}
-apiVersion: v1
-kind: Secret
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-type: Opaque
-data:
-  {{- range $key, $val := .Values.envs.secret }}
-  {{ $key }}: {{ $val | b64enc | quote }}
-  {{- end -}}
-{{- end}}

+ 0 - 22
charts/kafka-ui/templates/service.yaml

@@ -1,22 +0,0 @@
-apiVersion: v1
-kind: Service
-metadata:
-  name: {{ include "kafka-ui.fullname" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-{{- if .Values.service.annotations }}
-  annotations:
-{{ toYaml .Values.service.annotations | nindent 4 }}
-{{- end }}
-spec:
-  type: {{ .Values.service.type }}
-  ports:
-    - port: {{ .Values.service.port }}
-      targetPort: http
-      protocol: TCP
-      name: http
-      {{- if (and (eq .Values.service.type "NodePort") .Values.service.nodePort) }}
-      nodePort: {{ .Values.service.nodePort }}
-      {{- end }}
-  selector:
-    {{- include "kafka-ui.selectorLabels" . | nindent 4 }}

+ 0 - 12
charts/kafka-ui/templates/serviceaccount.yaml

@@ -1,12 +0,0 @@
-{{- if .Values.serviceAccount.create -}}
-apiVersion: v1
-kind: ServiceAccount
-metadata:
-  name: {{ include "kafka-ui.serviceAccountName" . }}
-  labels:
-    {{- include "kafka-ui.labels" . | nindent 4 }}
-  {{- with .Values.serviceAccount.annotations }}
-  annotations:
-    {{- toYaml . | nindent 4 }}
-  {{- end }}
-{{- end }}

+ 0 - 161
charts/kafka-ui/values.yaml

@@ -1,161 +0,0 @@
-replicaCount: 1
-
-image:
-  registry: docker.io
-  repository: provectuslabs/kafka-ui
-  pullPolicy: IfNotPresent
-  # Overrides the image tag whose default is the chart appVersion.
-  tag: ""
-
-imagePullSecrets: []
-nameOverride: ""
-fullnameOverride: ""
-
-serviceAccount:
-  # Specifies whether a service account should be created
-  create: true
-  # Annotations to add to the service account
-  annotations: {}
-  # The name of the service account to use.
-  # If not set and create is true, a name is generated using the fullname template
-  name: ""
-
-existingConfigMap: ""
-yamlApplicationConfig:
-  {}
-  # kafka:
-  #   clusters:
-  #     - name: yaml
-  #       bootstrapServers: kafka-service:9092
-  # spring:
-  #   security:
-  #     oauth2:
-  # auth:
-  #   type: disabled
-  # management:
-  #   health:
-  #     ldap:
-  #       enabled: false
-yamlApplicationConfigConfigMap:
-  {}
-  # keyName: config.yml
-  # name: configMapName
-existingSecret: ""
-envs:
-  secret: {}
-  config: {}
-
-networkPolicy:
-  enabled: false
-  egressRules:
-    ## Additional custom egress rules
-    ## e.g:
-    ## customRules:
-    ##   - to:
-    ##       - namespaceSelector:
-    ##           matchLabels:
-    ##             label: example
-    customRules: []
-  ingressRules:
-    ## Additional custom ingress rules
-    ## e.g:
-    ## customRules:
-    ##   - from:
-    ##       - namespaceSelector:
-    ##           matchLabels:
-    ##             label: example
-    customRules: []
-
-podAnnotations: {}
-podLabels: {}
-
-## Annotations to be added to kafka-ui Deployment
-##
-annotations: {}
-
-## Set field schema as HTTPS for readines and liveness probe
-##
-probes:
-  useHttpsScheme: false
-
-podSecurityContext:
-  {}
-  # fsGroup: 2000
-
-securityContext:
-  {}
-  # capabilities:
-  #   drop:
-  #   - ALL
-  # readOnlyRootFilesystem: true
-  # runAsNonRoot: true
-  # runAsUser: 1000
-
-service:
-  type: ClusterIP
-  port: 80
-  # if you want to force a specific nodePort. Must be use with service.type=NodePort
-  # nodePort:
-
-# Ingress configuration
-ingress:
-  # Enable ingress resource
-  enabled: false
-
-  # Annotations for the Ingress
-  annotations: {}
-
-  # ingressClassName for the Ingress
-  ingressClassName: ""
-
-  # The path for the Ingress
-  path: "/"
-
-  # The path type for the Ingress
-  pathType: "Prefix"  
-
-  # The hostname for the Ingress
-  host: ""
-
-  # configs for Ingress TLS
-  tls:
-    # Enable TLS termination for the Ingress
-    enabled: false
-    # the name of a pre-created Secret containing a TLS private key and certificate
-    secretName: ""
-
-  # HTTP paths to add to the Ingress before the default path
-  precedingPaths: []
-
-  # Http paths to add to the Ingress after the default path
-  succeedingPaths: []
-
-resources:
-  {}
-  # limits:
-  #   cpu: 200m
-  #   memory: 512Mi
-  # requests:
-  #   cpu: 200m
-  #   memory: 256Mi
-
-autoscaling:
-  enabled: false
-  minReplicas: 1
-  maxReplicas: 100
-  targetCPUUtilizationPercentage: 80
-  # targetMemoryUtilizationPercentage: 80
-
-nodeSelector: {}
-
-tolerations: []
-
-affinity: {}
-
-env: {}
-
-initContainers: {}
-
-volumeMounts: {}
-
-volumes: {}

+ 1 - 1
documentation/compose/jmx-exporter/kafka-broker.yml

@@ -1,2 +1,2 @@
 rules:
-  - pattern: ".*"
+  - pattern: ".*"

+ 2 - 0
documentation/compose/kafka-ui-arm64.yaml

@@ -20,6 +20,8 @@ services:
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
       DYNAMIC_CONFIG_ENABLED: 'true'  # not necessary, added for tests
+      KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
+      KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
 
   kafka0:
     image: confluentinc/cp-kafka:7.2.1.arm64

+ 1 - 1
kafka-ui-api/pom.xml

@@ -311,7 +311,7 @@
             <plugin>
                 <groupId>org.apache.maven.plugins</groupId>
                 <artifactId>maven-checkstyle-plugin</artifactId>
-                <version>3.1.2</version>
+                <version>3.3.0</version>
                 <dependencies>
                     <dependency>
                         <groupId>com.puppycrawl.tools</groupId>

+ 12 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java

@@ -51,6 +51,7 @@ public class ClustersProperties {
     List<Masking> masking;
     Long pollingThrottleRate;
     TruststoreConfig ssl;
+    AuditProperties audit;
   }
 
   @Data
@@ -143,6 +144,17 @@ public class ClustersProperties {
     }
   }
 
+  @Data
+  @NoArgsConstructor
+  @AllArgsConstructor
+  public static class AuditProperties {
+    String topic;
+    Integer auditTopicsPartitions;
+    Boolean topicAuditEnabled;
+    Boolean consoleAuditEnabled;
+    Map<String, String> auditTopicProperties;
+  }
+
   @PostConstruct
   public void validateAndSetDefaults() {
     if (clusters != null) {

+ 31 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java

@@ -1,18 +1,41 @@
 package com.provectus.kafka.ui.config;
 
+import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
+import org.springframework.http.HttpHeaders;
+import org.springframework.http.HttpMethod;
+import org.springframework.http.HttpStatus;
+import org.springframework.http.server.reactive.ServerHttpRequest;
+import org.springframework.http.server.reactive.ServerHttpResponse;
 import org.springframework.web.reactive.config.CorsRegistry;
 import org.springframework.web.reactive.config.WebFluxConfigurer;
+import org.springframework.web.server.ServerWebExchange;
+import org.springframework.web.server.WebFilter;
+import org.springframework.web.server.WebFilterChain;
+import reactor.core.publisher.Mono;
 
 @Configuration
-public class CorsGlobalConfiguration implements WebFluxConfigurer {
+public class CorsGlobalConfiguration {
 
-  @Override
-  public void addCorsMappings(CorsRegistry registry) {
-    registry.addMapping("/**")
-        .allowedOrigins("*")
-        .allowedMethods("*")
-        .allowedHeaders("*")
-        .allowCredentials(false);
+  @Bean
+  public WebFilter corsFilter() {
+    return (final ServerWebExchange ctx, final WebFilterChain chain) -> {
+      final ServerHttpRequest request = ctx.getRequest();
+
+      final ServerHttpResponse response = ctx.getResponse();
+      final HttpHeaders headers = response.getHeaders();
+      headers.add("Access-Control-Allow-Origin", "*");
+      headers.add("Access-Control-Allow-Methods", "GET, PUT, POST, DELETE, OPTIONS");
+      headers.add("Access-Control-Max-Age", "3600");
+      headers.add("Access-Control-Allow-Headers", "Content-Type");
+
+      if (request.getMethod() == HttpMethod.OPTIONS) {
+        response.setStatusCode(HttpStatus.OK);
+        return Mono.empty();
+      }
+
+      return chain.filter(ctx);
+    };
   }
+
 }

+ 12 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AclsController.java

@@ -8,6 +8,7 @@ import com.provectus.kafka.ui.model.KafkaAclResourceTypeDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.AclAction;
 import com.provectus.kafka.ui.service.acl.AclsService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Optional;
 import lombok.RequiredArgsConstructor;
@@ -26,6 +27,7 @@ public class AclsController extends AbstractController implements AclsApi {
 
   private final AclsService aclsService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
   @Override
   public Mono<ResponseEntity<Void>> createAcl(String clusterName, Mono<KafkaAclDTO> kafkaAclDto,
@@ -33,12 +35,14 @@ public class AclsController extends AbstractController implements AclsApi {
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
         .aclActions(AclAction.EDIT)
+        .operationName("createAcl")
         .build();
 
     return accessControlService.validateAccess(context)
         .then(kafkaAclDto)
         .map(ClusterMapper::toAclBinding)
         .flatMap(binding -> aclsService.createAcl(getCluster(clusterName), binding))
+        .doOnEach(sig -> auditService.audit(context, sig))
         .thenReturn(ResponseEntity.ok().build());
   }
 
@@ -48,12 +52,14 @@ public class AclsController extends AbstractController implements AclsApi {
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
         .aclActions(AclAction.EDIT)
+        .operationName("deleteAcl")
         .build();
 
     return accessControlService.validateAccess(context)
         .then(kafkaAclDto)
         .map(ClusterMapper::toAclBinding)
         .flatMap(binding -> aclsService.deleteAcl(getCluster(clusterName), binding))
+        .doOnEach(sig -> auditService.audit(context, sig))
         .thenReturn(ResponseEntity.ok().build());
   }
 
@@ -66,6 +72,7 @@ public class AclsController extends AbstractController implements AclsApi {
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
         .aclActions(AclAction.VIEW)
+        .operationName("listAcls")
         .build();
 
     var resourceType = Optional.ofNullable(resourceTypeDto)
@@ -83,7 +90,7 @@ public class AclsController extends AbstractController implements AclsApi {
             ResponseEntity.ok(
                 aclsService.listAcls(getCluster(clusterName), filter)
                     .map(ClusterMapper::toKafkaAclDto)))
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -91,12 +98,14 @@ public class AclsController extends AbstractController implements AclsApi {
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
         .aclActions(AclAction.VIEW)
+        .operationName("getAclAsCsv")
         .build();
 
     return accessControlService.validateAccess(context).then(
         aclsService.getAclAsCsvString(getCluster(clusterName))
             .map(ResponseEntity::ok)
             .flatMap(Mono::just)
+            .doOnEach(sig -> auditService.audit(context, sig))
     );
   }
 
@@ -105,11 +114,13 @@ public class AclsController extends AbstractController implements AclsApi {
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
         .aclActions(AclAction.EDIT)
+        .operationName("syncAclsCsv")
         .build();
 
     return accessControlService.validateAccess(context)
         .then(csvMono)
         .flatMap(csv -> aclsService.syncAclWithAclCsv(getCluster(clusterName), csv))
+        .doOnEach(sig -> auditService.audit(context, sig))
         .thenReturn(ResponseEntity.ok().build());
   }
 }

+ 32 - 24
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ApplicationConfigController.java

@@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.service.ApplicationInfoService;
 import com.provectus.kafka.ui.service.KafkaClusterFactory;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.util.ApplicationRestarter;
 import com.provectus.kafka.ui.util.DynamicConfigOperations;
@@ -55,6 +56,7 @@ public class ApplicationConfigController implements ApplicationConfigApi {
   private final ApplicationRestarter restarter;
   private final KafkaClusterFactory kafkaClusterFactory;
   private final ApplicationInfoService applicationInfoService;
+  private final AuditService auditService;
 
   @Override
   public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
@@ -63,62 +65,68 @@ public class ApplicationConfigController implements ApplicationConfigApi {
 
   @Override
   public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
-    return accessControlService
-        .validateAccess(
-            AccessContext.builder()
-                .applicationConfigActions(VIEW)
-                .build()
-        )
+    var context = AccessContext.builder()
+        .applicationConfigActions(VIEW)
+        .operationName("getCurrentConfig")
+        .build();
+    return accessControlService.validateAccess(context)
         .then(Mono.fromSupplier(() -> ResponseEntity.ok(
             new ApplicationConfigDTO()
                 .properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
-        )));
+        )))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
                                                       ServerWebExchange exchange) {
-    return accessControlService
-        .validateAccess(
-            AccessContext.builder()
-                .applicationConfigActions(EDIT)
-                .build()
-        )
+    var context =  AccessContext.builder()
+        .applicationConfigActions(EDIT)
+        .operationName("restartWithConfig")
+        .build();
+    return accessControlService.validateAccess(context)
         .then(restartRequestDto)
-        .map(dto -> {
+        .<ResponseEntity<Void>>map(dto -> {
           dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
           restarter.requestRestart();
           return ResponseEntity.ok().build();
-        });
+        })
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(Flux<Part> fileFlux,
                                                                            ServerWebExchange exchange) {
-    return accessControlService
-        .validateAccess(
-            AccessContext.builder()
-                .applicationConfigActions(EDIT)
-                .build()
-        )
+    var context = AccessContext.builder()
+        .applicationConfigActions(EDIT)
+        .operationName("uploadConfigRelatedFile")
+        .build();
+    return accessControlService.validateAccess(context)
         .then(fileFlux.single())
         .flatMap(file ->
             dynamicConfigOperations.uploadConfigRelatedFile((FilePart) file)
                 .map(path -> new UploadedFileInfoDTO().location(path.toString()))
-                .map(ResponseEntity::ok));
+                .map(ResponseEntity::ok))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
                                                                              ServerWebExchange exchange) {
-    return configDto
+    var context = AccessContext.builder()
+        .applicationConfigActions(EDIT)
+        .operationName("validateConfig")
+        .build();
+    return accessControlService.validateAccess(context)
+        .then(configDto)
         .flatMap(config -> {
           PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
           ClustersProperties clustersProperties = propertiesStructure.getKafka();
           return validateClustersConfig(clustersProperties)
               .map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
         })
-        .map(ResponseEntity::ok);
+        .map(ResponseEntity::ok)
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(

+ 53 - 29
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java

@@ -11,8 +11,11 @@ import com.provectus.kafka.ui.model.BrokersLogdirsDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
 import com.provectus.kafka.ui.service.BrokerService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
+import java.util.Map;
+import javax.annotation.Nullable;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import org.springframework.http.ResponseEntity;
@@ -27,61 +30,78 @@ import reactor.core.publisher.Mono;
 public class BrokersController extends AbstractController implements BrokersApi {
   private final BrokerService brokerService;
   private final ClusterMapper clusterMapper;
+
+  private final AuditService auditService;
   private final AccessControlService accessControlService;
 
   @Override
   public Mono<ResponseEntity<Flux<BrokerDTO>>> getBrokers(String clusterName,
                                                           ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
-        .build());
+        .operationName("getBrokers")
+        .build();
 
     var job = brokerService.getBrokers(getCluster(clusterName)).map(clusterMapper::toBrokerDto);
-
-    return validateAccess.thenReturn(ResponseEntity.ok(job));
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(job))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<BrokerMetricsDTO>> getBrokersMetrics(String clusterName, Integer id,
                                                                   ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
-        .build());
+        .operationName("getBrokersMetrics")
+        .operationParams(Map.of("id", id))
+        .build();
 
-    return validateAccess.then(
-        brokerService.getBrokerMetrics(getCluster(clusterName), id)
-            .map(clusterMapper::toBrokerMetrics)
-            .map(ResponseEntity::ok)
-            .onErrorReturn(ResponseEntity.notFound().build())
-    );
+    return accessControlService.validateAccess(context)
+        .then(
+            brokerService.getBrokerMetrics(getCluster(clusterName), id)
+                .map(clusterMapper::toBrokerMetrics)
+                .map(ResponseEntity::ok)
+                .onErrorReturn(ResponseEntity.notFound().build())
+        )
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Flux<BrokersLogdirsDTO>>> getAllBrokersLogdirs(String clusterName,
-                                                                            List<Integer> brokers,
+                                                                            @Nullable List<Integer> brokers,
                                                                             ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+
+    List<Integer> brokerIds = brokers == null ? List.of() : brokers;
+
+    var context = AccessContext.builder()
         .cluster(clusterName)
-        .build());
+        .operationName("getAllBrokersLogdirs")
+        .operationParams(Map.of("brokerIds", brokerIds))
+        .build();
 
-    return validateAccess.thenReturn(ResponseEntity.ok(
-        brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokers)));
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(
+            brokerService.getAllBrokersLogdirs(getCluster(clusterName), brokerIds)))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Flux<BrokerConfigDTO>>> getBrokerConfig(String clusterName,
                                                                      Integer id,
                                                                      ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .clusterConfigActions(ClusterConfigAction.VIEW)
-        .build());
+        .operationName("getBrokerConfig")
+        .operationParams(Map.of("brokerId", id))
+        .build();
 
-    return validateAccess.thenReturn(
+    return accessControlService.validateAccess(context).thenReturn(
         ResponseEntity.ok(
             brokerService.getBrokerConfig(getCluster(clusterName), id)
                 .map(clusterMapper::toBrokerConfig))
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -89,16 +109,18 @@ public class BrokersController extends AbstractController implements BrokersApi
                                                                      Integer id,
                                                                      Mono<BrokerLogdirUpdateDTO> brokerLogdir,
                                                                      ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
-        .build());
+        .operationName("updateBrokerTopicPartitionLogDir")
+        .operationParams(Map.of("brokerId", id))
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         brokerLogdir
             .flatMap(bld -> brokerService.updateBrokerLogDir(getCluster(clusterName), id, bld))
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -107,16 +129,18 @@ public class BrokersController extends AbstractController implements BrokersApi
                                                              String name,
                                                              Mono<BrokerConfigItemDTO> brokerConfig,
                                                              ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
-        .build());
+        .operationName("updateBrokerConfigByName")
+        .operationParams(Map.of("brokerId", id))
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         brokerConfig
             .flatMap(bci -> brokerService.updateBrokerConfigByName(
                 getCluster(clusterName), id, name, bci.getValue()))
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 }

+ 11 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java

@@ -6,6 +6,7 @@ import com.provectus.kafka.ui.model.ClusterMetricsDTO;
 import com.provectus.kafka.ui.model.ClusterStatsDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.service.ClusterService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
@@ -21,6 +22,7 @@ import reactor.core.publisher.Mono;
 public class ClustersController extends AbstractController implements ClustersApi {
   private final ClusterService clusterService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
   @Override
   public Mono<ResponseEntity<Flux<ClusterDTO>>> getClusters(ServerWebExchange exchange) {
@@ -35,6 +37,7 @@ public class ClustersController extends AbstractController implements ClustersAp
                                                                    ServerWebExchange exchange) {
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
+        .operationName("getClusterMetrics")
         .build();
 
     return accessControlService.validateAccess(context)
@@ -42,7 +45,8 @@ public class ClustersController extends AbstractController implements ClustersAp
             clusterService.getClusterMetrics(getCluster(clusterName))
                 .map(ResponseEntity::ok)
                 .onErrorReturn(ResponseEntity.notFound().build())
-        );
+        )
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -50,6 +54,7 @@ public class ClustersController extends AbstractController implements ClustersAp
                                                                ServerWebExchange exchange) {
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
+        .operationName("getClusterStats")
         .build();
 
     return accessControlService.validateAccess(context)
@@ -57,7 +62,8 @@ public class ClustersController extends AbstractController implements ClustersAp
             clusterService.getClusterStats(getCluster(clusterName))
                 .map(ResponseEntity::ok)
                 .onErrorReturn(ResponseEntity.notFound().build())
-        );
+        )
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -66,11 +72,11 @@ public class ClustersController extends AbstractController implements ClustersAp
 
     AccessContext context = AccessContext.builder()
         .cluster(clusterName)
+        .operationName("updateClusterInfo")
         .build();
 
     return accessControlService.validateAccess(context)
-        .then(
-            clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok)
-        );
+        .then(clusterService.updateCluster(getCluster(clusterName)).map(ResponseEntity::ok))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 }

+ 33 - 22
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java

@@ -19,6 +19,7 @@ import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.service.ConsumerGroupService;
 import com.provectus.kafka.ui.service.OffsetsResetService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Map;
 import java.util.Optional;
@@ -42,6 +43,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
   private final ConsumerGroupService consumerGroupService;
   private final OffsetsResetService offsetsResetService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
   @Value("${consumer.groups.page.size:25}")
   private int defaultConsumerGroupsPageSize;
@@ -50,44 +52,47 @@ public class ConsumerGroupsController extends AbstractController implements Cons
   public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName,
                                                         String id,
                                                         ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .consumerGroup(id)
         .consumerGroupActions(DELETE)
-        .build());
+        .operationName("deleteConsumerGroup")
+        .build();
 
-    return validateAccess.then(
-        consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id)
-            .thenReturn(ResponseEntity.ok().build())
-    );
+    return accessControlService.validateAccess(context)
+        .then(consumerGroupService.deleteConsumerGroupById(getCluster(clusterName), id))
+        .doOnEach(sig -> auditService.audit(context, sig))
+        .thenReturn(ResponseEntity.ok().build());
   }
 
   @Override
   public Mono<ResponseEntity<ConsumerGroupDetailsDTO>> getConsumerGroup(String clusterName,
                                                                         String consumerGroupId,
                                                                         ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .consumerGroup(consumerGroupId)
         .consumerGroupActions(VIEW)
-        .build());
+        .operationName("getConsumerGroup")
+        .build();
 
-    return validateAccess.then(
-        consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
+    return accessControlService.validateAccess(context)
+        .then(consumerGroupService.getConsumerGroupDetail(getCluster(clusterName), consumerGroupId)
             .map(ConsumerGroupMapper::toDetailsDto)
-            .map(ResponseEntity::ok)
-    );
+            .map(ResponseEntity::ok))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(String clusterName,
                                                                              String topicName,
                                                                              ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(TopicAction.VIEW)
-        .build());
+        .operationName("getTopicConsumerGroups")
+        .build();
 
     Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> job =
         consumerGroupService.getConsumerGroupsForTopic(getCluster(clusterName), topicName)
@@ -99,7 +104,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
             .map(ResponseEntity::ok)
             .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
 
-    return validateAccess.then(job);
+    return accessControlService.validateAccess(context)
+        .then(job)
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -112,12 +119,13 @@ public class ConsumerGroupsController extends AbstractController implements Cons
       SortOrderDTO sortOrderDto,
       ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         // consumer group access validation is within the service
-        .build());
+        .operationName("getConsumerGroupsPage")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         consumerGroupService.getConsumerGroupsPage(
                 getCluster(clusterName),
                 Optional.ofNullable(page).filter(i -> i > 0).orElse(1),
@@ -128,7 +136,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
             )
             .map(this::convertPage)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -137,12 +145,13 @@ public class ConsumerGroupsController extends AbstractController implements Cons
                                                               Mono<ConsumerGroupOffsetsResetDTO> resetDto,
                                                               ServerWebExchange exchange) {
     return resetDto.flatMap(reset -> {
-      Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+      var context = AccessContext.builder()
           .cluster(clusterName)
           .topic(reset.getTopic())
           .topicActions(TopicAction.VIEW)
           .consumerGroupActions(RESET_OFFSETS)
-          .build());
+          .operationName("resetConsumerGroupOffsets")
+          .build();
 
       Supplier<Mono<Void>> mono = () -> {
         var cluster = getCluster(clusterName);
@@ -182,7 +191,9 @@ public class ConsumerGroupsController extends AbstractController implements Cons
         }
       };
 
-      return validateAccess.then(mono.get());
+      return accessControlService.validateAccess(context)
+          .then(mono.get())
+          .doOnEach(sig -> auditService.audit(context, sig));
     }).thenReturn(ResponseEntity.ok().build());
   }
 

+ 94 - 58
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java

@@ -1,5 +1,9 @@
 package com.provectus.kafka.ui.controller;
 
+import static com.provectus.kafka.ui.model.ConnectorActionDTO.RESTART;
+import static com.provectus.kafka.ui.model.ConnectorActionDTO.RESTART_ALL_TASKS;
+import static com.provectus.kafka.ui.model.ConnectorActionDTO.RESTART_FAILED_TASKS;
+
 import com.provectus.kafka.ui.api.KafkaConnectApi;
 import com.provectus.kafka.ui.model.ConnectDTO;
 import com.provectus.kafka.ui.model.ConnectorActionDTO;
@@ -14,9 +18,11 @@ import com.provectus.kafka.ui.model.TaskDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
 import com.provectus.kafka.ui.service.KafkaConnectService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Comparator;
 import java.util.Map;
+import java.util.Set;
 import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
@@ -30,8 +36,12 @@ import reactor.core.publisher.Mono;
 @RequiredArgsConstructor
 @Slf4j
 public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
+  private static final Set<ConnectorActionDTO> RESTART_ACTIONS
+      = Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
+
   private final KafkaConnectService kafkaConnectService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
   @Override
   public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
@@ -47,15 +57,16 @@ public class KafkaConnectController extends AbstractController implements KafkaC
   public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
                                                           ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
-        .build());
+        .operationName("getConnectors")
+        .build();
 
-    return validateAccess.thenReturn(
-        ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName))
-    );
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName)))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -63,16 +74,17 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                             @Valid Mono<NewConnectorDTO> connector,
                                                             ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW, ConnectAction.CREATE)
-        .build());
+        .operationName("createConnector")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService.createConnector(getCluster(clusterName), connectName, connector)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -80,17 +92,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                          String connectorName,
                                                          ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
         .connector(connectorName)
-        .build());
+        .operationName("getConnector")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService.getConnector(getCluster(clusterName), connectName, connectorName)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -98,16 +111,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                     String connectorName,
                                                     ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
-        .build());
+        .operationName("deleteConnector")
+        .operationParams(Map.of("connectorName", connectName))
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService.deleteConnector(getCluster(clusterName), connectName, connectorName)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
 
@@ -119,14 +134,23 @@ public class KafkaConnectController extends AbstractController implements KafkaC
       SortOrderDTO sortOrder,
       ServerWebExchange exchange
   ) {
+    var context = AccessContext.builder()
+        .cluster(clusterName)
+        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
+        .operationName("getAllConnectors")
+        .build();
+
     var comparator = sortOrder == null || sortOrder.equals(SortOrderDTO.ASC)
         ? getConnectorsComparator(orderBy)
         : getConnectorsComparator(orderBy).reversed();
+
     Flux<FullConnectorInfoDTO> job = kafkaConnectService.getAllConnectors(getCluster(clusterName), search)
         .filterWhen(dto -> accessControlService.isConnectAccessible(dto.getConnect(), clusterName))
-        .filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName));
+        .filterWhen(dto -> accessControlService.isConnectorAccessible(dto.getConnect(), dto.getName(), clusterName))
+        .sort(comparator);
 
-    return Mono.just(ResponseEntity.ok(job.sort(comparator)));
+    return Mono.just(ResponseEntity.ok(job))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -135,17 +159,18 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                       String connectorName,
                                                                       ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
-        .build());
+        .operationName("getConnectorConfig")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService
             .getConnectorConfig(getCluster(clusterName), connectName, connectorName)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -154,16 +179,19 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                Mono<Map<String, Object>> requestBody,
                                                                ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
-        .build());
-
-    return validateAccess.then(
-        kafkaConnectService
-            .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
-            .map(ResponseEntity::ok));
+        .operationName("setConnectorConfig")
+        .operationParams(Map.of("connectorName", connectorName))
+        .build();
+
+    return accessControlService.validateAccess(context).then(
+            kafkaConnectService
+                .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
+                .map(ResponseEntity::ok))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -171,18 +199,26 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                          String connectorName,
                                                          ConnectorActionDTO action,
                                                          ServerWebExchange exchange) {
+    ConnectAction[] connectActions;
+    if (RESTART_ACTIONS.contains(action)) {
+      connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.RESTART};
+    } else {
+      connectActions = new ConnectAction[] {ConnectAction.VIEW, ConnectAction.EDIT};
+    }
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
-        .connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
-        .build());
+        .connectActions(connectActions)
+        .operationName("updateConnectorState")
+        .operationParams(Map.of("connectorName", connectorName))
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService
             .updateConnectorState(getCluster(clusterName), connectName, connectorName, action)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -190,17 +226,19 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                                String connectName,
                                                                String connectorName,
                                                                ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
-        .build());
+        .operationName("getConnectorTasks")
+        .operationParams(Map.of("connectorName", connectorName))
+        .build();
 
-    return validateAccess.thenReturn(
+    return accessControlService.validateAccess(context).thenReturn(
         ResponseEntity
             .ok(kafkaConnectService
                 .getConnectorTasks(getCluster(clusterName), connectName, connectorName))
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -208,34 +246,37 @@ public class KafkaConnectController extends AbstractController implements KafkaC
                                                          String connectorName, Integer taskId,
                                                          ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
-        .build());
+        .operationName("restartConnectorTask")
+        .operationParams(Map.of("connectorName", connectorName))
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         kafkaConnectService
             .restartConnectorTask(getCluster(clusterName), connectName, connectorName, taskId)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Flux<ConnectorPluginDTO>>> getConnectorPlugins(
       String clusterName, String connectName, ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .connect(connectName)
         .connectActions(ConnectAction.VIEW)
-        .build());
+        .operationName("getConnectorPlugins")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         Mono.just(
             ResponseEntity.ok(
                 kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -253,16 +294,11 @@ public class KafkaConnectController extends AbstractController implements KafkaC
     if (orderBy == null) {
       return defaultComparator;
     }
-    switch (orderBy) {
-      case CONNECT:
-        return Comparator.comparing(FullConnectorInfoDTO::getConnect);
-      case TYPE:
-        return Comparator.comparing(FullConnectorInfoDTO::getType);
-      case STATUS:
-        return Comparator.comparing(fullConnectorInfoDTO -> fullConnectorInfoDTO.getStatus().getState());
-      case NAME:
-      default:
-        return defaultComparator;
-    }
+    return switch (orderBy) {
+      case CONNECT -> Comparator.comparing(FullConnectorInfoDTO::getConnect);
+      case TYPE -> Comparator.comparing(FullConnectorInfoDTO::getType);
+      case STATUS -> Comparator.comparing(fullConnectorInfoDTO -> fullConnectorInfoDTO.getStatus().getState());
+      default -> defaultComparator;
+    };
   }
 }

+ 37 - 25
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java

@@ -9,6 +9,7 @@ import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
 import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.KsqlAction;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
@@ -29,38 +30,43 @@ public class KsqlController extends AbstractController implements KsqlApi {
 
   private final KsqlServiceV2 ksqlServiceV2;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
   @Override
   public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
-                                                                    Mono<KsqlCommandV2DTO>
-                                                                        ksqlCommand2Dto,
+                                                                    Mono<KsqlCommandV2DTO> ksqlCmdDo,
                                                                     ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
-        .cluster(clusterName)
-        .ksqlActions(KsqlAction.EXECUTE)
-        .build());
-
-    return validateAccess.then(
-        ksqlCommand2Dto.map(dto -> {
-          var id = ksqlServiceV2.registerCommand(
-              getCluster(clusterName),
-              dto.getKsql(),
-              Optional.ofNullable(dto.getStreamsProperties()).orElse(Map.of()));
-          return new KsqlCommandV2ResponseDTO().pipeId(id);
-        }).map(ResponseEntity::ok)
-    );
+    return ksqlCmdDo.flatMap(
+            command -> {
+              var context = AccessContext.builder()
+                  .cluster(clusterName)
+                  .ksqlActions(KsqlAction.EXECUTE)
+                  .operationName("executeKsql")
+                  .operationParams(command)
+                  .build();
+              return accessControlService.validateAccess(context).thenReturn(
+                      new KsqlCommandV2ResponseDTO().pipeId(
+                          ksqlServiceV2.registerCommand(
+                              getCluster(clusterName),
+                              command.getKsql(),
+                              Optional.ofNullable(command.getStreamsProperties()).orElse(Map.of()))))
+                  .doOnEach(sig -> auditService.audit(context, sig));
+            }
+        )
+        .map(ResponseEntity::ok);
   }
 
   @Override
   public Mono<ResponseEntity<Flux<KsqlResponseDTO>>> openKsqlResponsePipe(String clusterName,
                                                                           String pipeId,
                                                                           ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .ksqlActions(KsqlAction.EXECUTE)
-        .build());
+        .operationName("openKsqlResponsePipe")
+        .build();
 
-    return validateAccess.thenReturn(
+    return accessControlService.validateAccess(context).thenReturn(
         ResponseEntity.ok(ksqlServiceV2.execute(pipeId)
             .map(table -> new KsqlResponseDTO()
                 .table(
@@ -74,22 +80,28 @@ public class KsqlController extends AbstractController implements KsqlApi {
   @Override
   public Mono<ResponseEntity<Flux<KsqlStreamDescriptionDTO>>> listStreams(String clusterName,
                                                                           ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .ksqlActions(KsqlAction.EXECUTE)
-        .build());
+        .operationName("listStreams")
+        .build();
 
-    return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))));
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(ksqlServiceV2.listStreams(getCluster(clusterName))))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Flux<KsqlTableDescriptionDTO>>> listTables(String clusterName,
                                                                         ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .ksqlActions(KsqlAction.EXECUTE)
-        .build());
+        .operationName("listTables")
+        .build();
 
-    return validateAccess.thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))));
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(ksqlServiceV2.listTables(getCluster(clusterName))))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 }

+ 40 - 15
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java

@@ -15,12 +15,16 @@ import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
 import com.provectus.kafka.ui.model.SeekDirectionDTO;
 import com.provectus.kafka.ui.model.SeekTypeDTO;
 import com.provectus.kafka.ui.model.SerdeUsageDTO;
+import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
+import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.model.TopicSerdeSuggestionDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
 import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import com.provectus.kafka.ui.service.DeserializationService;
 import com.provectus.kafka.ui.service.MessagesService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
 import java.util.Map;
@@ -46,25 +50,34 @@ public class MessagesController extends AbstractController implements MessagesAp
   private final MessagesService messagesService;
   private final DeserializationService deserializationService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
   @Override
   public Mono<ResponseEntity<Void>> deleteTopicMessages(
       String clusterName, String topicName, @Valid List<Integer> partitions,
       ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(MESSAGES_DELETE)
-        .build());
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).<ResponseEntity<Void>>then(
         messagesService.deleteTopicMessages(
             getCluster(clusterName),
             topicName,
             Optional.ofNullable(partitions).orElse(List.of())
         ).thenReturn(ResponseEntity.ok().build())
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
+  }
+
+  @Override
+  public Mono<ResponseEntity<SmartFilterTestExecutionResultDTO>> executeSmartFilterTest(
+      Mono<SmartFilterTestExecutionDTO> smartFilterTestExecutionDto, ServerWebExchange exchange) {
+    return smartFilterTestExecutionDto
+        .map(MessagesService::execSmartFilterTest)
+        .map(ResponseEntity::ok);
   }
 
   @Override
@@ -79,11 +92,15 @@ public class MessagesController extends AbstractController implements MessagesAp
                                                                            String keySerde,
                                                                            String valueSerde,
                                                                            ServerWebExchange exchange) {
-    final Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var contextBuilder = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(MESSAGES_READ)
-        .build());
+        .operationName("getTopicMessages");
+
+    if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
+      contextBuilder.auditActions(AuditAction.VIEW);
+    }
 
     seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
     seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
@@ -102,7 +119,10 @@ public class MessagesController extends AbstractController implements MessagesAp
         )
     );
 
-    return validateAccess.then(job);
+    var context = contextBuilder.build();
+    return accessControlService.validateAccess(context)
+        .then(job)
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -110,17 +130,18 @@ public class MessagesController extends AbstractController implements MessagesAp
       String clusterName, String topicName, @Valid Mono<CreateTopicMessageDTO> createTopicMessage,
       ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(MESSAGES_PRODUCE)
-        .build());
+        .operationName("sendTopicMessages")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         createTopicMessage.flatMap(msg ->
             messagesService.sendMessage(getCluster(clusterName), topicName, msg).then()
         ).map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   /**
@@ -156,12 +177,12 @@ public class MessagesController extends AbstractController implements MessagesAp
                                                                  String topicName,
                                                                  SerdeUsageDTO use,
                                                                  ServerWebExchange exchange) {
-
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(TopicAction.VIEW)
-        .build());
+        .operationName("getSerdes")
+        .build();
 
     TopicSerdeSuggestionDTO dto = new TopicSerdeSuggestionDTO()
         .key(use == SerdeUsageDTO.SERIALIZE
@@ -171,10 +192,14 @@ public class MessagesController extends AbstractController implements MessagesAp
             ? deserializationService.getSerdesForSerialize(getCluster(clusterName), topicName, VALUE)
             : deserializationService.getSerdesForDeserialize(getCluster(clusterName), topicName, VALUE));
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         Mono.just(dto)
             .subscribeOn(Schedulers.boundedElastic())
             .map(ResponseEntity::ok)
     );
   }
+
+
+
+
 }

+ 64 - 35
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java

@@ -13,8 +13,10 @@ import com.provectus.kafka.ui.model.SchemaSubjectsResponseDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
 import com.provectus.kafka.ui.service.SchemaRegistryService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.List;
+import java.util.Map;
 import java.util.stream.Collectors;
 import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
@@ -37,6 +39,7 @@ public class SchemasController extends AbstractController implements SchemasApi
 
   private final SchemaRegistryService schemaRegistryService;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
   @Override
   protected KafkaCluster getCluster(String clusterName) {
@@ -51,13 +54,14 @@ public class SchemasController extends AbstractController implements SchemasApi
   public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
       String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
         .schemaActions(SchemaAction.VIEW)
-        .build());
+        .operationName("checkSchemaCompatibility")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         newSchemaSubjectMono.flatMap(subjectDTO ->
                 schemaRegistryService.checksSchemaCompatibility(
                     getCluster(clusterName),
@@ -66,19 +70,20 @@ public class SchemasController extends AbstractController implements SchemasApi
                 ))
             .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
       String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schemaActions(SchemaAction.CREATE)
-        .build());
+        .operationName("createNewSchema")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         newSchemaSubjectMono.flatMap(newSubject ->
                 schemaRegistryService.registerNewSchema(
                     getCluster(clusterName),
@@ -87,20 +92,22 @@ public class SchemasController extends AbstractController implements SchemasApi
                 )
             ).map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Void>> deleteLatestSchema(
       String clusterName, String subject, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
         .schemaActions(SchemaAction.DELETE)
-        .build());
+        .operationName("deleteLatestSchema")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.deleteLatestSchemaSubject(getCluster(clusterName), subject)
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
     );
   }
@@ -108,14 +115,16 @@ public class SchemasController extends AbstractController implements SchemasApi
   @Override
   public Mono<ResponseEntity<Void>> deleteSchema(
       String clusterName, String subject, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
         .schemaActions(SchemaAction.DELETE)
-        .build());
+        .operationName("deleteSchema")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.deleteSchemaSubjectEntirely(getCluster(clusterName), subject)
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
     );
   }
@@ -123,14 +132,16 @@ public class SchemasController extends AbstractController implements SchemasApi
   @Override
   public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
       String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schema(subjectName)
         .schemaActions(SchemaAction.DELETE)
-        .build());
+        .operationName("deleteSchemaByVersion")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
     );
   }
@@ -138,16 +149,20 @@ public class SchemasController extends AbstractController implements SchemasApi
   @Override
   public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
       String clusterName, String subjectName, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schema(subjectName)
         .schemaActions(SchemaAction.VIEW)
-        .build());
+        .operationName("getAllVersionsBySubject")
+        .build();
 
     Flux<SchemaSubjectDTO> schemas =
         schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
             .map(kafkaSrMapper::toDto);
-    return validateAccess.thenReturn(ResponseEntity.ok(schemas));
+
+    return accessControlService.validateAccess(context)
+        .thenReturn(ResponseEntity.ok(schemas))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -163,34 +178,37 @@ public class SchemasController extends AbstractController implements SchemasApi
   public Mono<ResponseEntity<SchemaSubjectDTO>> getLatestSchema(String clusterName,
                                                                 String subject,
                                                                 ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
         .schemaActions(SchemaAction.VIEW)
-        .build());
+        .operationName("getLatestSchema")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
             .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
       String clusterName, String subject, Integer version, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
         .schemaActions(SchemaAction.VIEW)
-        .build());
+        .operationName("getSchemaByVersion")
+        .operationParams(Map.of("subject", subject, "version", version))
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         schemaRegistryService.getSchemaSubjectByVersion(
                 getCluster(clusterName), subject, version)
             .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -199,6 +217,11 @@ public class SchemasController extends AbstractController implements SchemasApi
                                                                     @Valid Integer perPage,
                                                                     @Valid String search,
                                                                     ServerWebExchange serverWebExchange) {
+    var context = AccessContext.builder()
+        .cluster(clusterName)
+        .operationName("getSchemas")
+        .build();
+
     return schemaRegistryService
         .getAllSubjectNames(getCluster(clusterName))
         .flatMapIterable(l -> l)
@@ -220,25 +243,28 @@ public class SchemasController extends AbstractController implements SchemasApi
           return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
               .map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
               .map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
-        }).map(ResponseEntity::ok);
+        }).map(ResponseEntity::ok)
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
       String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schemaActions(SchemaAction.MODIFY_GLOBAL_COMPATIBILITY)
-        .build());
+        .operationName("updateGlobalSchemaCompatibilityLevel")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         compatibilityLevelMono
             .flatMap(compatibilityLevelDTO ->
                 schemaRegistryService.updateGlobalSchemaCompatibility(
                     getCluster(clusterName),
                     kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
                 ))
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
     );
   }
@@ -247,12 +273,14 @@ public class SchemasController extends AbstractController implements SchemasApi
   public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
       String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .schemaActions(SchemaAction.EDIT)
-        .build());
+        .operationName("updateSchemaCompatibilityLevel")
+        .operationParams(Map.of("subject", subject))
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         compatibilityLevelMono
             .flatMap(compatibilityLevelDTO ->
                 schemaRegistryService.updateSchemaCompatibility(
@@ -260,6 +288,7 @@ public class SchemasController extends AbstractController implements SchemasApi
                     subject,
                     kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
                 ))
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
     );
   }

+ 92 - 66
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java

@@ -27,9 +27,11 @@ import com.provectus.kafka.ui.model.TopicsResponseDTO;
 import com.provectus.kafka.ui.model.rbac.AccessContext;
 import com.provectus.kafka.ui.service.TopicsService;
 import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Map;
 import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
@@ -52,69 +54,78 @@ public class TopicsController extends AbstractController implements TopicsApi {
   private final TopicAnalysisService topicAnalysisService;
   private final ClusterMapper clusterMapper;
   private final AccessControlService accessControlService;
+  private final AuditService auditService;
 
   @Override
   public Mono<ResponseEntity<TopicDTO>> createTopic(
-      String clusterName, @Valid Mono<TopicCreationDTO> topicCreation, ServerWebExchange exchange) {
-
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
-        .cluster(clusterName)
-        .topicActions(CREATE)
-        .build());
-
-    return validateAccess.then(
-        topicsService.createTopic(getCluster(clusterName), topicCreation)
-            .map(clusterMapper::toTopic)
-            .map(s -> new ResponseEntity<>(s, HttpStatus.OK))
-            .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
-    );
+      String clusterName, @Valid Mono<TopicCreationDTO> topicCreationMono, ServerWebExchange exchange) {
+    return topicCreationMono.flatMap(topicCreation -> {
+      var context = AccessContext.builder()
+          .cluster(clusterName)
+          .topicActions(CREATE)
+          .operationName("createTopic")
+          .operationParams(topicCreation)
+          .build();
+
+      return accessControlService.validateAccess(context)
+          .then(topicsService.createTopic(getCluster(clusterName), topicCreation))
+          .map(clusterMapper::toTopic)
+          .map(s -> new ResponseEntity<>(s, HttpStatus.OK))
+          .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()))
+          .doOnEach(sig -> auditService.audit(context, sig));
+    });
   }
 
   @Override
   public Mono<ResponseEntity<TopicDTO>> recreateTopic(String clusterName,
                                                       String topicName, ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(VIEW, CREATE, DELETE)
-        .build());
+        .operationName("recreateTopic")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         topicsService.recreateTopic(getCluster(clusterName), topicName)
             .map(clusterMapper::toTopic)
             .map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<TopicDTO>> cloneTopic(
       String clusterName, String topicName, String newTopicName, ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(VIEW, CREATE)
-        .build());
+        .operationName("cloneTopic")
+        .operationParams(Map.of("newTopicName", newTopicName))
+        .build();
 
-    return validateAccess.then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
-        .map(clusterMapper::toTopic)
-        .map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
-    );
+    return accessControlService.validateAccess(context)
+        .then(topicsService.cloneTopic(getCluster(clusterName), topicName, newTopicName)
+            .map(clusterMapper::toTopic)
+            .map(s -> new ResponseEntity<>(s, HttpStatus.CREATED))
+        ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Void>> deleteTopic(
       String clusterName, String topicName, ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(DELETE)
-        .build());
+        .operationName("deleteTopic")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         topicsService.deleteTopic(getCluster(clusterName), topicName).map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
 
@@ -122,13 +133,14 @@ public class TopicsController extends AbstractController implements TopicsApi {
   public Mono<ResponseEntity<Flux<TopicConfigDTO>>> getTopicConfigs(
       String clusterName, String topicName, ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(VIEW)
-        .build());
+        .operationName("getTopicConfigs")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         topicsService.getTopicConfigs(getCluster(clusterName), topicName)
             .map(lst -> lst.stream()
                 .map(InternalTopicConfig::from)
@@ -136,24 +148,25 @@ public class TopicsController extends AbstractController implements TopicsApi {
                 .collect(toList()))
             .map(Flux::fromIterable)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<TopicDetailsDTO>> getTopicDetails(
       String clusterName, String topicName, ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(VIEW)
-        .build());
+        .operationName("getTopicDetails")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         topicsService.getTopicDetails(getCluster(clusterName), topicName)
             .map(clusterMapper::toTopicDetails)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -166,13 +179,19 @@ public class TopicsController extends AbstractController implements TopicsApi {
                                                            @Valid SortOrderDTO sortOrder,
                                                            ServerWebExchange exchange) {
 
+    AccessContext context = AccessContext.builder()
+        .cluster(clusterName)
+        .operationName("getTopics")
+        .build();
+
     return topicsService.getTopicsForPagination(getCluster(clusterName))
-        .flatMap(existingTopics -> {
+        .flatMap(topics -> accessControlService.filterViewableTopics(topics, clusterName))
+        .flatMap(topics -> {
           int pageSize = perPage != null && perPage > 0 ? perPage : DEFAULT_PAGE_SIZE;
           var topicsToSkip = ((page != null && page > 0 ? page : 1) - 1) * pageSize;
           var comparator = sortOrder == null || !sortOrder.equals(SortOrderDTO.DESC)
               ? getComparatorForTopic(orderBy) : getComparatorForTopic(orderBy).reversed();
-          List<InternalTopic> filtered = existingTopics.stream()
+          List<InternalTopic> filtered = topics.stream()
               .filter(topic -> !topic.isInternal()
                   || showInternal != null && showInternal)
               .filter(topic -> search == null || StringUtils.containsIgnoreCase(topic.getName(), search))
@@ -188,15 +207,13 @@ public class TopicsController extends AbstractController implements TopicsApi {
               .collect(toList());
 
           return topicsService.loadTopics(getCluster(clusterName), topicsPage)
-              .flatMapMany(Flux::fromIterable)
-              .filterWhen(dto -> accessControlService.isTopicAccessible(dto, clusterName))
-              .collectList()
               .map(topicsToRender ->
                   new TopicsResponseDTO()
                       .topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
                       .pageCount(totalPages));
         })
-        .map(ResponseEntity::ok);
+        .map(ResponseEntity::ok)
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -204,18 +221,19 @@ public class TopicsController extends AbstractController implements TopicsApi {
       String clusterName, String topicName, @Valid Mono<TopicUpdateDTO> topicUpdate,
       ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(VIEW, EDIT)
-        .build());
+        .operationName("updateTopic")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         topicsService
             .updateTopic(getCluster(clusterName), topicName, topicUpdate)
             .map(clusterMapper::toTopic)
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -224,17 +242,17 @@ public class TopicsController extends AbstractController implements TopicsApi {
       Mono<PartitionsIncreaseDTO> partitionsIncrease,
       ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(VIEW, EDIT)
-        .build());
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         partitionsIncrease.flatMap(partitions ->
             topicsService.increaseTopicPartitions(getCluster(clusterName), topicName, partitions)
         ).map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
@@ -243,31 +261,34 @@ public class TopicsController extends AbstractController implements TopicsApi {
       Mono<ReplicationFactorChangeDTO> replicationFactorChange,
       ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(VIEW, EDIT)
-        .build());
+        .operationName("changeReplicationFactor")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         replicationFactorChange
             .flatMap(rfc ->
                 topicsService.changeReplicationFactor(getCluster(clusterName), topicName, rfc))
             .map(ResponseEntity::ok)
-    );
+    ).doOnEach(sig -> auditService.audit(context, sig));
   }
 
   @Override
   public Mono<ResponseEntity<Void>> analyzeTopic(String clusterName, String topicName, ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(MESSAGES_READ)
-        .build());
+        .operationName("analyzeTopic")
+        .build();
 
-    return validateAccess.then(
+    return accessControlService.validateAccess(context).then(
         topicAnalysisService.analyze(getCluster(clusterName), topicName)
+            .doOnEach(sig -> auditService.audit(context, sig))
             .thenReturn(ResponseEntity.ok().build())
     );
   }
@@ -275,15 +296,17 @@ public class TopicsController extends AbstractController implements TopicsApi {
   @Override
   public Mono<ResponseEntity<Void>> cancelTopicAnalysis(String clusterName, String topicName,
                                                         ServerWebExchange exchange) {
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(MESSAGES_READ)
-        .build());
-
-    topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName);
+        .operationName("cancelTopicAnalysis")
+        .build();
 
-    return validateAccess.thenReturn(ResponseEntity.ok().build());
+    return accessControlService.validateAccess(context)
+        .then(Mono.fromRunnable(() -> topicAnalysisService.cancelAnalysis(getCluster(clusterName), topicName)))
+        .doOnEach(sig -> auditService.audit(context, sig))
+        .thenReturn(ResponseEntity.ok().build());
   }
 
 
@@ -292,15 +315,18 @@ public class TopicsController extends AbstractController implements TopicsApi {
                                                                  String topicName,
                                                                  ServerWebExchange exchange) {
 
-    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+    var context = AccessContext.builder()
         .cluster(clusterName)
         .topic(topicName)
         .topicActions(MESSAGES_READ)
-        .build());
+        .operationName("getTopicAnalysis")
+        .build();
 
-    return validateAccess.thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
-        .map(ResponseEntity::ok)
-        .orElseGet(() -> ResponseEntity.notFound().build()));
+    return accessControlService.validateAccess(context)
+        .thenReturn(topicAnalysisService.getTopicAnalysis(getCluster(clusterName), topicName)
+            .map(ResponseEntity::ok)
+            .orElseGet(() -> ResponseEntity.notFound().build()))
+        .doOnEach(sig -> auditService.audit(context, sig));
   }
 
   private Comparator<InternalTopic> getComparatorForTopic(

+ 7 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/JsonAvroConversionException.java

@@ -0,0 +1,7 @@
+package com.provectus.kafka.ui.exception;
+
+public class JsonAvroConversionException extends ValidationException {
+  public JsonAvroConversionException(String message) {
+    super(message);
+  }
+}

+ 3 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ConsumerGroupMapper.java

@@ -28,7 +28,7 @@ public class ConsumerGroupMapper {
     consumerGroup.setTopics(1); //for ui backward-compatibility, need to rm usage from ui
     consumerGroup.setGroupId(c.getGroupId());
     consumerGroup.setMembers(c.getMembers());
-    consumerGroup.setMessagesBehind(c.getMessagesBehind());
+    consumerGroup.setConsumerLag(c.getConsumerLag());
     consumerGroup.setSimple(c.isSimple());
     consumerGroup.setPartitionAssignor(c.getPartitionAssignor());
     consumerGroup.setState(mapConsumerGroupState(c.getState()));
@@ -54,7 +54,7 @@ public class ConsumerGroupMapper {
           .orElse(0L);
 
       partition.setEndOffset(endOffset.orElse(0L));
-      partition.setMessagesBehind(behind);
+      partition.setConsumerLag(behind);
 
       partitionMap.put(entry.getKey(), partition);
     }
@@ -80,7 +80,7 @@ public class ConsumerGroupMapper {
       InternalConsumerGroup c, T consumerGroup) {
     consumerGroup.setGroupId(c.getGroupId());
     consumerGroup.setMembers(c.getMembers().size());
-    consumerGroup.setMessagesBehind(c.getMessagesBehind());
+    consumerGroup.setConsumerLag(c.getConsumerLag());
     consumerGroup.setTopics(c.getTopicNum());
     consumerGroup.setSimple(c.isSimple());
 

+ 7 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalConsumerGroup.java

@@ -21,7 +21,7 @@ public class InternalConsumerGroup {
   private final Collection<InternalMember> members;
   private final Map<TopicPartition, Long> offsets;
   private final Map<TopicPartition, Long> endOffsets;
-  private final Long messagesBehind;
+  private final Long consumerLag;
   private final Integer topicNum;
   private final String partitionAssignor;
   private final ConsumerGroupState state;
@@ -50,17 +50,17 @@ public class InternalConsumerGroup {
     builder.members(internalMembers);
     builder.offsets(groupOffsets);
     builder.endOffsets(topicEndOffsets);
-    builder.messagesBehind(calculateMessagesBehind(groupOffsets, topicEndOffsets));
+    builder.consumerLag(calculateConsumerLag(groupOffsets, topicEndOffsets));
     builder.topicNum(calculateTopicNum(groupOffsets, internalMembers));
     Optional.ofNullable(description.coordinator()).ifPresent(builder::coordinator);
     return builder.build();
   }
 
-  private static Long calculateMessagesBehind(Map<TopicPartition, Long> offsets, Map<TopicPartition, Long> endOffsets) {
-    Long messagesBehind = null;
-    // messagesBehind should be undefined if no committed offsets found for topic
+  private static Long calculateConsumerLag(Map<TopicPartition, Long> offsets, Map<TopicPartition, Long> endOffsets) {
+    Long consumerLag = null;
+    // consumerLag should be undefined if no committed offsets found for topic
     if (!offsets.isEmpty()) {
-      messagesBehind = offsets.entrySet().stream()
+      consumerLag = offsets.entrySet().stream()
           .mapToLong(e ->
               Optional.ofNullable(endOffsets)
                   .map(o -> o.get(e.getKey()))
@@ -69,7 +69,7 @@ public class InternalConsumerGroup {
           ).sum();
     }
 
-    return messagesBehind;
+    return consumerLag;
   }
 
   private static Integer calculateTopicNum(Map<TopicPartition, Long> offsets, Collection<InternalMember> members) {

+ 4 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopicConsumerGroup.java

@@ -17,7 +17,7 @@ public class InternalTopicConsumerGroup {
   String groupId;
   int members;
   @Nullable
-  Long messagesBehind; //null means no committed offsets found for this group
+  Long consumerLag; //null means no committed offsets found for this group
   boolean isSimple;
   String partitionAssignor;
   ConsumerGroupState state;
@@ -37,7 +37,7 @@ public class InternalTopicConsumerGroup {
                 .filter(m -> m.assignment().topicPartitions().stream().anyMatch(p -> p.topic().equals(topic)))
                 .count()
         )
-        .messagesBehind(calculateMessagesBehind(committedOffsets, endOffsets))
+        .consumerLag(calculateConsumerLag(committedOffsets, endOffsets))
         .isSimple(g.isSimpleConsumerGroup())
         .partitionAssignor(g.partitionAssignor())
         .state(g.state())
@@ -46,8 +46,8 @@ public class InternalTopicConsumerGroup {
   }
 
   @Nullable
-  private static Long calculateMessagesBehind(Map<TopicPartition, Long> committedOffsets,
-                                              Map<TopicPartition, Long> endOffsets) {
+  private static Long calculateConsumerLag(Map<TopicPartition, Long> committedOffsets,
+                                           Map<TopicPartition, Long> endOffsets) {
     if (committedOffsets.isEmpty()) {
       return null;
     }

+ 34 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/AccessContext.java

@@ -2,6 +2,7 @@ package com.provectus.kafka.ui.model.rbac;
 
 import com.provectus.kafka.ui.model.rbac.permission.AclAction;
 import com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction;
+import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
 import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
 import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
 import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
@@ -11,6 +12,7 @@ import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import lombok.Value;
 import org.springframework.util.Assert;
 
@@ -40,6 +42,11 @@ public class AccessContext {
 
   Collection<AclAction> aclActions;
 
+  Collection<AuditAction> auditAction;
+
+  String operationName;
+  Object operationParams;
+
   public static AccessContextBuilder builder() {
     return new AccessContextBuilder();
   }
@@ -59,6 +66,10 @@ public class AccessContext {
     private Collection<SchemaAction> schemaActions = Collections.emptySet();
     private Collection<KsqlAction> ksqlActions = Collections.emptySet();
     private Collection<AclAction> aclActions = Collections.emptySet();
+    private Collection<AuditAction> auditActions = Collections.emptySet();
+
+    private String operationName;
+    private Object operationParams;
 
     private AccessContextBuilder() {
     }
@@ -141,6 +152,27 @@ public class AccessContext {
       return this;
     }
 
+    public AccessContextBuilder auditActions(AuditAction... actions) {
+      Assert.isTrue(actions.length > 0, "actions not present");
+      this.auditActions = List.of(actions);
+      return this;
+    }
+
+    public AccessContextBuilder operationName(String operationName) {
+      this.operationName = operationName;
+      return this;
+    }
+
+    public AccessContextBuilder operationParams(Object operationParams) {
+      this.operationParams = operationParams;
+      return this;
+    }
+
+    public AccessContextBuilder operationParams(Map<String, Object> paramsMap) {
+      this.operationParams = paramsMap;
+      return this;
+    }
+
     public AccessContext build() {
       return new AccessContext(
           applicationConfigActions,
@@ -150,7 +182,8 @@ public class AccessContext {
           connect, connectActions,
           connector,
           schema, schemaActions,
-          ksqlActions, aclActions);
+          ksqlActions, aclActions, auditActions,
+          operationName, operationParams);
     }
   }
 }

+ 5 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Permission.java

@@ -2,11 +2,13 @@ package com.provectus.kafka.ui.model.rbac;
 
 import static com.provectus.kafka.ui.model.rbac.Resource.ACL;
 import static com.provectus.kafka.ui.model.rbac.Resource.APPLICATIONCONFIG;
+import static com.provectus.kafka.ui.model.rbac.Resource.AUDIT;
 import static com.provectus.kafka.ui.model.rbac.Resource.CLUSTERCONFIG;
 import static com.provectus.kafka.ui.model.rbac.Resource.KSQL;
 
 import com.provectus.kafka.ui.model.rbac.permission.AclAction;
 import com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction;
+import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
 import com.provectus.kafka.ui.model.rbac.permission.ClusterConfigAction;
 import com.provectus.kafka.ui.model.rbac.permission.ConnectAction;
 import com.provectus.kafka.ui.model.rbac.permission.ConsumerGroupAction;
@@ -28,7 +30,8 @@ import org.springframework.util.Assert;
 @EqualsAndHashCode
 public class Permission {
 
-  private static final List<Resource> RBAC_ACTION_EXEMPT_LIST = List.of(KSQL, CLUSTERCONFIG, APPLICATIONCONFIG, ACL);
+  private static final List<Resource> RBAC_ACTION_EXEMPT_LIST =
+      List.of(KSQL, CLUSTERCONFIG, APPLICATIONCONFIG, ACL, AUDIT);
 
   Resource resource;
   List<String> actions;
@@ -79,6 +82,7 @@ public class Permission {
       case CONNECT -> Arrays.stream(ConnectAction.values()).map(Enum::toString).toList();
       case KSQL -> Arrays.stream(KsqlAction.values()).map(Enum::toString).toList();
       case ACL -> Arrays.stream(AclAction.values()).map(Enum::toString).toList();
+      case AUDIT -> Arrays.stream(AuditAction.values()).map(Enum::toString).toList();
     };
   }
 

+ 2 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/Resource.java

@@ -12,7 +12,8 @@ public enum Resource {
   SCHEMA,
   CONNECT,
   KSQL,
-  ACL;
+  ACL,
+  AUDIT;
 
   @Nullable
   public static Resource fromString(String name) {

+ 14 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/AuditAction.java

@@ -0,0 +1,14 @@
+package com.provectus.kafka.ui.model.rbac.permission;
+
+import org.apache.commons.lang3.EnumUtils;
+import org.jetbrains.annotations.Nullable;
+
+public enum AuditAction implements PermissibleAction {
+
+  VIEW;
+
+  @Nullable
+  public static AuditAction fromString(String name) {
+    return EnumUtils.getEnum(AuditAction.class, name);
+  }
+}

+ 5 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/rbac/permission/PermissibleAction.java

@@ -1,4 +1,8 @@
 package com.provectus.kafka.ui.model.rbac.permission;
 
-public interface PermissibleAction {
+public sealed interface PermissibleAction permits
+    AclAction, ApplicationConfigAction,
+    ConsumerGroupAction, SchemaAction,
+    ConnectAction, ClusterConfigAction,
+    KsqlAction, TopicAction, AuditAction {
 }

+ 24 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/SerdeInstance.java

@@ -42,19 +42,39 @@ public class SerdeInstance implements Closeable {
   }
 
   public Optional<SchemaDescription> getSchema(String topic, Serde.Target type) {
-    return wrapWithClassloader(() -> serde.getSchema(topic, type));
+    try {
+      return wrapWithClassloader(() -> serde.getSchema(topic, type));
+    } catch (Exception e) {
+      log.warn("Error getting schema for '{}'({}) with serde '{}'", topic, type, name, e);
+      return Optional.empty();
+    }
   }
 
   public Optional<String> description() {
-    return wrapWithClassloader(serde::getDescription);
+    try {
+      return wrapWithClassloader(serde::getDescription);
+    } catch (Exception e) {
+      log.warn("Error getting description serde '{}'", name, e);
+      return Optional.empty();
+    }
   }
 
   public boolean canSerialize(String topic, Serde.Target type) {
-    return wrapWithClassloader(() -> serde.canSerialize(topic, type));
+    try {
+      return wrapWithClassloader(() -> serde.canSerialize(topic, type));
+    } catch (Exception e) {
+      log.warn("Error calling canSerialize for '{}'({}) with serde '{}'", topic, type, name, e);
+      return false;
+    }
   }
 
   public boolean canDeserialize(String topic, Serde.Target type) {
-    return wrapWithClassloader(() -> serde.canDeserialize(topic, type));
+    try {
+      return wrapWithClassloader(() -> serde.canDeserialize(topic, type));
+    } catch (Exception e) {
+      log.warn("Error calling canDeserialize for '{}'({}) with serde '{}'", topic, type, name, e);
+      return false;
+    }
   }
 
   public Serde.Serializer serializer(String topic, Serde.Target type) {

+ 24 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/SerdesInitializer.java

@@ -11,6 +11,7 @@ import com.provectus.kafka.ui.serde.api.PropertyResolver;
 import com.provectus.kafka.ui.serde.api.Serde;
 import com.provectus.kafka.ui.serdes.builtin.AvroEmbeddedSerde;
 import com.provectus.kafka.ui.serdes.builtin.Base64Serde;
+import com.provectus.kafka.ui.serdes.builtin.ConsumerOffsetsSerde;
 import com.provectus.kafka.ui.serdes.builtin.Int32Serde;
 import com.provectus.kafka.ui.serdes.builtin.Int64Serde;
 import com.provectus.kafka.ui.serdes.builtin.ProtobufFileSerde;
@@ -118,6 +119,8 @@ public class SerdesInitializer {
       }
     });
 
+    registerTopicRelatedSerde(registeredSerdes);
+
     return new ClusterSerdes(
         registeredSerdes,
         Optional.ofNullable(clusterProperties.getDefaultKeySerde())
@@ -132,6 +135,27 @@ public class SerdesInitializer {
     );
   }
 
+  /**
+   * Registers serdse that should only be used for specific (hard-coded) topics, like ConsumerOffsetsSerde.
+   */
+  private void registerTopicRelatedSerde(Map<String, SerdeInstance> serdes) {
+    registerConsumerOffsetsSerde(serdes);
+  }
+
+  private void registerConsumerOffsetsSerde(Map<String, SerdeInstance> serdes) {
+    var pattern = Pattern.compile(ConsumerOffsetsSerde.TOPIC);
+    serdes.put(
+        ConsumerOffsetsSerde.name(),
+        new SerdeInstance(
+            ConsumerOffsetsSerde.name(),
+            new ConsumerOffsetsSerde(),
+            pattern,
+            pattern,
+            null
+        )
+    );
+  }
+
   private SerdeInstance createFallbackSerde() {
     StringSerde serde = new StringSerde();
     serde.configure(PropertyResolverImpl.empty(), PropertyResolverImpl.empty(), PropertyResolverImpl.empty());

+ 294 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/ConsumerOffsetsSerde.java

@@ -0,0 +1,294 @@
+package com.provectus.kafka.ui.serdes.builtin;
+
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.fasterxml.jackson.databind.JsonSerializer;
+import com.fasterxml.jackson.databind.SerializerProvider;
+import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.fasterxml.jackson.databind.module.SimpleModule;
+import com.provectus.kafka.ui.serde.api.DeserializeResult;
+import com.provectus.kafka.ui.serde.api.SchemaDescription;
+import com.provectus.kafka.ui.serdes.BuiltInSerde;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.Map;
+import java.util.Optional;
+import lombok.SneakyThrows;
+import org.apache.kafka.common.protocol.types.ArrayOf;
+import org.apache.kafka.common.protocol.types.BoundField;
+import org.apache.kafka.common.protocol.types.CompactArrayOf;
+import org.apache.kafka.common.protocol.types.Field;
+import org.apache.kafka.common.protocol.types.Schema;
+import org.apache.kafka.common.protocol.types.Struct;
+import org.apache.kafka.common.protocol.types.Type;
+
+// Deserialization logic and message's schemas can be found in
+// kafka.coordinator.group.GroupMetadataManager (readMessageKey, readOffsetMessageValue, readGroupMessageValue)
+public class ConsumerOffsetsSerde implements BuiltInSerde {
+
+  private static final JsonMapper JSON_MAPPER = createMapper();
+
+  public static final String TOPIC = "__consumer_offsets";
+
+  public static String name() {
+    return "__consumer_offsets";
+  }
+
+  private static JsonMapper createMapper() {
+    var module = new SimpleModule();
+    module.addSerializer(Struct.class, new JsonSerializer<>() {
+      @Override
+      public void serialize(Struct value, JsonGenerator gen, SerializerProvider serializers) throws IOException {
+        gen.writeStartObject();
+        for (BoundField field : value.schema().fields()) {
+          var fieldVal = value.get(field);
+          gen.writeObjectField(field.def.name, fieldVal);
+        }
+        gen.writeEndObject();
+      }
+    });
+    var mapper = new JsonMapper();
+    mapper.registerModule(module);
+    return mapper;
+  }
+
+  @Override
+  public Optional<String> getDescription() {
+    return Optional.empty();
+  }
+
+  @Override
+  public Optional<SchemaDescription> getSchema(String topic, Target type) {
+    return Optional.empty();
+  }
+
+  @Override
+  public boolean canDeserialize(String topic, Target type) {
+    return topic.equals(TOPIC);
+  }
+
+  @Override
+  public boolean canSerialize(String topic, Target type) {
+    return false;
+  }
+
+  @Override
+  public Serializer serializer(String topic, Target type) {
+    throw new UnsupportedOperationException();
+  }
+
+  @Override
+  public Deserializer deserializer(String topic, Target type) {
+    return switch (type) {
+      case KEY -> keyDeserializer();
+      case VALUE -> valueDeserializer();
+    };
+  }
+
+  private Deserializer keyDeserializer() {
+    final Schema commitKeySchema = new Schema(
+        new Field("group", Type.STRING, ""),
+        new Field("topic", Type.STRING, ""),
+        new Field("partition", Type.INT32, "")
+    );
+
+    final Schema groupMetadataSchema = new Schema(
+        new Field("group", Type.STRING, "")
+    );
+
+    return (headers, data) -> {
+      var bb = ByteBuffer.wrap(data);
+      short version = bb.getShort();
+      return new DeserializeResult(
+          toJson(
+              switch (version) {
+                case 0, 1 -> commitKeySchema.read(bb);
+                case 2 -> groupMetadataSchema.read(bb);
+                default -> throw new IllegalStateException("Unknown group metadata message version: " + version);
+              }
+          ),
+          DeserializeResult.Type.JSON,
+          Map.of()
+      );
+    };
+  }
+
+  private Deserializer valueDeserializer() {
+    final Schema commitOffsetSchemaV0 =
+        new Schema(
+            new Field("offset", Type.INT64, ""),
+            new Field("metadata", Type.STRING, ""),
+            new Field("commit_timestamp", Type.INT64, "")
+        );
+
+    final Schema commitOffsetSchemaV1 =
+        new Schema(
+            new Field("offset", Type.INT64, ""),
+            new Field("metadata", Type.STRING, ""),
+            new Field("commit_timestamp", Type.INT64, ""),
+            new Field("expire_timestamp", Type.INT64, "")
+        );
+
+    final Schema commitOffsetSchemaV2 =
+        new Schema(
+            new Field("offset", Type.INT64, ""),
+            new Field("metadata", Type.STRING, ""),
+            new Field("commit_timestamp", Type.INT64, "")
+        );
+
+    final Schema commitOffsetSchemaV3 =
+        new Schema(
+            new Field("offset", Type.INT64, ""),
+            new Field("leader_epoch", Type.INT32, ""),
+            new Field("metadata", Type.STRING, ""),
+            new Field("commit_timestamp", Type.INT64, "")
+        );
+
+    final Schema commitOffsetSchemaV4 = new Schema(
+        new Field("offset", Type.INT64, ""),
+        new Field("leader_epoch", Type.INT32, ""),
+        new Field("metadata", Type.COMPACT_STRING, ""),
+        new Field("commit_timestamp", Type.INT64, ""),
+        Field.TaggedFieldsSection.of()
+    );
+
+    final Schema metadataSchema0 =
+        new Schema(
+            new Field("protocol_type", Type.STRING, ""),
+            new Field("generation", Type.INT32, ""),
+            new Field("protocol", Type.NULLABLE_STRING, ""),
+            new Field("leader", Type.NULLABLE_STRING, ""),
+            new Field("members", new ArrayOf(new Schema(
+                new Field("member_id", Type.STRING, ""),
+                new Field("client_id", Type.STRING, ""),
+                new Field("client_host", Type.STRING, ""),
+                new Field("session_timeout", Type.INT32, ""),
+                new Field("subscription", Type.BYTES, ""),
+                new Field("assignment", Type.BYTES, "")
+            )), "")
+        );
+
+    final Schema metadataSchema1 =
+        new Schema(
+            new Field("protocol_type", Type.STRING, ""),
+            new Field("generation", Type.INT32, ""),
+            new Field("protocol", Type.NULLABLE_STRING, ""),
+            new Field("leader", Type.NULLABLE_STRING, ""),
+            new Field("members", new ArrayOf(new Schema(
+                new Field("member_id", Type.STRING, ""),
+                new Field("client_id", Type.STRING, ""),
+                new Field("client_host", Type.STRING, ""),
+                new Field("rebalance_timeout", Type.INT32, ""),
+                new Field("session_timeout", Type.INT32, ""),
+                new Field("subscription", Type.BYTES, ""),
+                new Field("assignment", Type.BYTES, "")
+            )), "")
+        );
+
+    final Schema metadataSchema2 =
+        new Schema(
+            new Field("protocol_type", Type.STRING, ""),
+            new Field("generation", Type.INT32, ""),
+            new Field("protocol", Type.NULLABLE_STRING, ""),
+            new Field("leader", Type.NULLABLE_STRING, ""),
+            new Field("current_state_timestamp", Type.INT64, ""),
+            new Field("members", new ArrayOf(new Schema(
+                new Field("member_id", Type.STRING, ""),
+                new Field("client_id", Type.STRING, ""),
+                new Field("client_host", Type.STRING, ""),
+                new Field("rebalance_timeout", Type.INT32, ""),
+                new Field("session_timeout", Type.INT32, ""),
+                new Field("subscription", Type.BYTES, ""),
+                new Field("assignment", Type.BYTES, "")
+            )), "")
+        );
+
+    final Schema metadataSchema3 =
+        new Schema(
+            new Field("protocol_type", Type.STRING, ""),
+            new Field("generation", Type.INT32, ""),
+            new Field("protocol", Type.NULLABLE_STRING, ""),
+            new Field("leader", Type.NULLABLE_STRING, ""),
+            new Field("current_state_timestamp", Type.INT64, ""),
+            new Field("members", new ArrayOf(new Schema(
+                new Field("member_id", Type.STRING, ""),
+                new Field("group_instance_id", Type.NULLABLE_STRING, ""),
+                new Field("client_id", Type.STRING, ""),
+                new Field("client_host", Type.STRING, ""),
+                new Field("rebalance_timeout", Type.INT32, ""),
+                new Field("session_timeout", Type.INT32, ""),
+                new Field("subscription", Type.BYTES, ""),
+                new Field("assignment", Type.BYTES, "")
+            )), "")
+        );
+
+    final Schema metadataSchema4 =
+        new Schema(
+            new Field("protocol_type", Type.COMPACT_STRING, ""),
+            new Field("generation", Type.INT32, ""),
+            new Field("protocol", Type.COMPACT_NULLABLE_STRING, ""),
+            new Field("leader", Type.COMPACT_NULLABLE_STRING, ""),
+            new Field("current_state_timestamp", Type.INT64, ""),
+            new Field("members", new CompactArrayOf(new Schema(
+                new Field("member_id", Type.COMPACT_STRING, ""),
+                new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, ""),
+                new Field("client_id", Type.COMPACT_STRING, ""),
+                new Field("client_host", Type.COMPACT_STRING, ""),
+                new Field("rebalance_timeout", Type.INT32, ""),
+                new Field("session_timeout", Type.INT32, ""),
+                new Field("subscription", Type.COMPACT_BYTES, ""),
+                new Field("assignment", Type.COMPACT_BYTES, ""),
+                Field.TaggedFieldsSection.of()
+            )), ""),
+            Field.TaggedFieldsSection.of()
+        );
+
+    return (headers, data) -> {
+      String result;
+      var bb = ByteBuffer.wrap(data);
+      short version = bb.getShort();
+      // ideally, we should distinguish if value is commit or metadata
+      // by checking record's key, but our current serde structure doesn't allow that.
+      // so, we trying to parse into metadata first and after into commit msg
+      try {
+        result = toJson(
+            switch (version) {
+              case 0 -> metadataSchema0.read(bb);
+              case 1 -> metadataSchema1.read(bb);
+              case 2 -> metadataSchema2.read(bb);
+              case 3 -> metadataSchema3.read(bb);
+              case 4 -> metadataSchema4.read(bb);
+              default -> throw new IllegalArgumentException("Unrecognized version: " + version);
+            }
+        );
+      } catch (Throwable e) {
+        bb = bb.rewind();
+        bb.getShort(); // skipping version
+        result = toJson(
+            switch (version) {
+              case 0 -> commitOffsetSchemaV0.read(bb);
+              case 1 -> commitOffsetSchemaV1.read(bb);
+              case 2 -> commitOffsetSchemaV2.read(bb);
+              case 3 -> commitOffsetSchemaV3.read(bb);
+              case 4 -> commitOffsetSchemaV4.read(bb);
+              default -> throw new IllegalArgumentException("Unrecognized version: " + version);
+            }
+        );
+      }
+
+      if (bb.remaining() != 0) {
+        throw new IllegalArgumentException(
+            "Message buffer is not read to the end, which is likely means message is unrecognized");
+      }
+      return new DeserializeResult(
+          result,
+          DeserializeResult.Type.JSON,
+          Map.of()
+      );
+    };
+  }
+
+  @SneakyThrows
+  private String toJson(Struct s) {
+    return JSON_MAPPER.writeValueAsString(s);
+  }
+}

+ 6 - 14
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/ProtobufFileSerde.java

@@ -50,7 +50,6 @@ import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
 import java.io.ByteArrayInputStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
-import java.nio.file.Paths;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
@@ -204,17 +203,13 @@ public class ProtobufFileSerde implements BuiltInSerde {
                        Map<String, Descriptor> keyMessageDescriptorMap) {
 
     static boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties) {
-      Optional<String> protobufFile = kafkaClusterProperties.getProperty("protobufFile", String.class);
       Optional<List<String>> protobufFiles = kafkaClusterProperties.getListProperty("protobufFiles", String.class);
       Optional<String> protobufFilesDir = kafkaClusterProperties.getProperty("protobufFilesDir", String.class);
-      return protobufFilesDir.isPresent()
-          || protobufFile.isPresent()
-          || protobufFiles.filter(files -> !files.isEmpty()).isPresent();
+      return protobufFilesDir.isPresent() || protobufFiles.filter(files -> !files.isEmpty()).isPresent();
     }
 
     static Configuration create(PropertyResolver properties) {
       var protobufSchemas = loadSchemas(
-          properties.getProperty("protobufFile", String.class),
           properties.getListProperty("protobufFiles", String.class),
           properties.getProperty("protobufFilesDir", String.class)
       );
@@ -272,12 +267,11 @@ public class ProtobufFileSerde implements BuiltInSerde {
     }
 
     @VisibleForTesting
-    static Map<Path, ProtobufSchema> loadSchemas(Optional<String> protobufFile,
-                                                 Optional<List<String>> protobufFiles,
+    static Map<Path, ProtobufSchema> loadSchemas(Optional<List<String>> protobufFiles,
                                                  Optional<String> protobufFilesDir) {
       if (protobufFilesDir.isPresent()) {
-        if (protobufFile.isPresent() || protobufFiles.isPresent()) {
-          log.warn("protobufFile and protobufFiles properties will be ignored, since protobufFilesDir provided");
+        if (protobufFiles.isPresent()) {
+          log.warn("protobufFiles properties will be ignored, since protobufFilesDir provided");
         }
         List<ProtoFile> loadedFiles = new ProtoSchemaLoader(protobufFilesDir.get()).load();
         Map<String, ProtoFileElement> allPaths = loadedFiles.stream()
@@ -288,10 +282,8 @@ public class ProtobufFileSerde implements BuiltInSerde {
                 f -> new ProtobufSchema(f.toElement(), List.of(), allPaths)));
       }
       //Supporting for backward-compatibility. Normally, protobufFilesDir setting should be used
-      return Stream.concat(
-              protobufFile.stream(),
-              protobufFiles.stream().flatMap(Collection::stream)
-          )
+      return protobufFiles.stream()
+          .flatMap(Collection::stream)
           .distinct()
           .map(Path::of)
           .collect(Collectors.toMap(path -> path, path -> new ProtobufSchema(readFileAsString(path))));

+ 4 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/sr/AvroSchemaRegistrySerializer.java

@@ -1,12 +1,13 @@
 package com.provectus.kafka.ui.serdes.builtin.sr;
 
+import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
 import io.confluent.kafka.schemaregistry.ParsedSchema;
 import io.confluent.kafka.schemaregistry.avro.AvroSchema;
-import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
 import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
 import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
 import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
 import io.confluent.kafka.serializers.KafkaAvroSerializer;
+import io.confluent.kafka.serializers.KafkaAvroSerializerConfig;
 import java.util.Map;
 import org.apache.kafka.common.serialization.Serializer;
 
@@ -25,6 +26,7 @@ class AvroSchemaRegistrySerializer extends SchemaRegistrySerializer<Object> {
         Map.of(
             "schema.registry.url", "wontbeused",
             AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
+            KafkaAvroSerializerConfig.AVRO_USE_LOGICAL_TYPE_CONVERTERS_CONFIG, true,
             AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
         ),
         isKey
@@ -35,7 +37,7 @@ class AvroSchemaRegistrySerializer extends SchemaRegistrySerializer<Object> {
   @Override
   protected Object serialize(String value, ParsedSchema schema) {
     try {
-      return AvroSchemaUtils.toObject(value, (AvroSchema) schema);
+      return JsonAvroConversion.convertJsonToAvro(value, ((AvroSchema) schema).rawSchema());
     } catch (Throwable e) {
       throw new RuntimeException("Failed to serialize record for topic " + topic, e);
     }

+ 14 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/sr/MessageFormatter.java

@@ -3,9 +3,12 @@ package com.provectus.kafka.ui.serdes.builtin.sr;
 import com.fasterxml.jackson.databind.JsonNode;
 import com.google.protobuf.Message;
 import com.google.protobuf.util.JsonFormat;
+import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
 import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
 import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
 import io.confluent.kafka.serializers.KafkaAvroDeserializer;
+import io.confluent.kafka.serializers.KafkaAvroDeserializerConfig;
 import io.confluent.kafka.serializers.json.KafkaJsonSchemaDeserializer;
 import io.confluent.kafka.serializers.protobuf.KafkaProtobufDeserializer;
 import java.util.Map;
@@ -28,16 +31,22 @@ interface MessageFormatter {
 
     AvroMessageFormatter(SchemaRegistryClient client) {
       this.avroDeserializer = new KafkaAvroDeserializer(client);
+      this.avroDeserializer.configure(
+          Map.of(
+              AbstractKafkaSchemaSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG, "wontbeused",
+              KafkaAvroDeserializerConfig.SPECIFIC_AVRO_READER_CONFIG, false,
+              KafkaAvroDeserializerConfig.SCHEMA_REFLECTION_CONFIG, false,
+              KafkaAvroDeserializerConfig.AVRO_USE_LOGICAL_TYPE_CONVERTERS_CONFIG, true
+          ),
+          false
+      );
     }
 
     @Override
-    @SneakyThrows
     public String format(String topic, byte[] value) {
-      // deserialized will have type, that depends on schema type (record or primitive),
-      // AvroSchemaUtils.toJson(...) method will take it into account
       Object deserialized = avroDeserializer.deserialize(topic, value);
-      byte[] jsonBytes = AvroSchemaUtils.toJson(deserialized);
-      return new String(jsonBytes);
+      var schema = AvroSchemaUtils.getSchema(deserialized);
+      return JsonAvroConversion.convertAvroToJson(deserialized, schema).toString();
     }
   }
 

+ 33 - 37
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serdes/builtin/sr/SchemaRegistrySerde.java

@@ -189,39 +189,40 @@ public class SchemaRegistrySerde implements BuiltInSerde {
   public Optional<SchemaDescription> getSchema(String topic, Target type) {
     String subject = schemaSubject(topic, type);
     return getSchemaBySubject(subject)
-        .map(schemaMetadata ->
-            new SchemaDescription(
-                convertSchema(schemaMetadata),
-                Map.of(
-                    "subject", subject,
-                    "schemaId", schemaMetadata.getId(),
-                    "latestVersion", schemaMetadata.getVersion(),
-                    "type", schemaMetadata.getSchemaType() // AVRO / PROTOBUF / JSON
-                )
-            ));
+        .flatMap(schemaMetadata ->
+            //schema can be not-found, when schema contexts configured improperly
+            getSchemaById(schemaMetadata.getId())
+                .map(parsedSchema ->
+                    new SchemaDescription(
+                        convertSchema(schemaMetadata, parsedSchema),
+                        Map.of(
+                            "subject", subject,
+                            "schemaId", schemaMetadata.getId(),
+                            "latestVersion", schemaMetadata.getVersion(),
+                            "type", schemaMetadata.getSchemaType() // AVRO / PROTOBUF / JSON
+                        )
+                    )));
   }
 
   @SneakyThrows
-  private String convertSchema(SchemaMetadata schema) {
+  private String convertSchema(SchemaMetadata schema, ParsedSchema parsedSchema) {
     URI basePath = new URI(schemaRegistryUrls.get(0))
         .resolve(Integer.toString(schema.getId()));
-    ParsedSchema schemaById = schemaRegistryClient.getSchemaById(schema.getId());
     SchemaType schemaType = SchemaType.fromString(schema.getSchemaType())
         .orElseThrow(() -> new IllegalStateException("Unknown schema type: " + schema.getSchemaType()));
-    switch (schemaType) {
-      case PROTOBUF:
-        return new ProtobufSchemaConverter()
-            .convert(basePath, ((ProtobufSchema) schemaById).toDescriptor())
-            .toJson();
-      case AVRO:
-        return new AvroJsonSchemaConverter()
-            .convert(basePath, ((AvroSchema) schemaById).rawSchema())
-            .toJson();
-      case JSON:
-        return schema.getSchema();
-      default:
-        throw new IllegalStateException();
-    }
+    return switch (schemaType) {
+      case PROTOBUF -> new ProtobufSchemaConverter()
+          .convert(basePath, ((ProtobufSchema) parsedSchema).toDescriptor())
+          .toJson();
+      case AVRO -> new AvroJsonSchemaConverter()
+          .convert(basePath, ((AvroSchema) parsedSchema).rawSchema())
+          .toJson();
+      case JSON -> schema.getSchema();
+    };
+  }
+
+  private Optional<ParsedSchema> getSchemaById(int id) {
+    return wrapWith404Handler(() -> schemaRegistryClient.getSchemaById(id));
   }
 
   private Optional<SchemaMetadata> getSchemaBySubject(String subject) {
@@ -253,16 +254,11 @@ public class SchemaRegistrySerde implements BuiltInSerde {
     boolean isKey = type == Target.KEY;
     SchemaType schemaType = SchemaType.fromString(schema.getSchemaType())
         .orElseThrow(() -> new IllegalStateException("Unknown schema type: " + schema.getSchemaType()));
-    switch (schemaType) {
-      case PROTOBUF:
-        return new ProtobufSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
-      case AVRO:
-        return new AvroSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
-      case JSON:
-        return new JsonSchemaSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
-      default:
-        throw new IllegalStateException();
-    }
+    return switch (schemaType) {
+      case PROTOBUF -> new ProtobufSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
+      case AVRO -> new AvroSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
+      case JSON -> new JsonSchemaSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
+    };
   }
 
   @Override
@@ -297,7 +293,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
   }
 
   private SchemaType getMessageFormatBySchemaId(int schemaId) {
-    return wrapWith404Handler(() -> schemaRegistryClient.getSchemaById(schemaId))
+    return getSchemaById(schemaId)
         .map(ParsedSchema::schemaType)
         .flatMap(SchemaType::fromString)
         .orElseThrow(() -> new ValidationException(String.format("Schema for id '%d' not found ", schemaId)));

+ 2 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerService.java

@@ -17,6 +17,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
+import javax.annotation.Nullable;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.admin.ConfigEntry;
@@ -117,7 +118,7 @@ public class BrokerService {
               .stream()
               .map(Node::id)
               .collect(Collectors.toList());
-          if (reqBrokers != null && !reqBrokers.isEmpty()) {
+          if (!reqBrokers.isEmpty()) {
             brokers.retainAll(reqBrokers);
           }
           return admin.describeLogDirs(brokers);

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumerGroupService.java

@@ -164,7 +164,7 @@ public class ConsumerGroupService {
       case MESSAGES_BEHIND -> {
 
         Comparator<GroupWithDescr> comparator = Comparator.comparingLong(gwd ->
-            gwd.icg.getMessagesBehind() == null ? 0L : gwd.icg.getMessagesBehind());
+            gwd.icg.getConsumerLag() == null ? 0L : gwd.icg.getConsumerLag());
 
         yield loadDescriptionsByInternalConsumerGroups(ac, groups, comparator, pageNum, perPage, sortOrderDto);
       }

+ 12 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConfigSanitizer.java

@@ -5,11 +5,13 @@ import static java.util.regex.Pattern.CASE_INSENSITIVE;
 import com.google.common.collect.ImmutableList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.regex.Pattern;
 import java.util.stream.Collectors;
+import javax.annotation.Nullable;
 import org.apache.kafka.common.config.ConfigDef;
 import org.apache.kafka.common.config.SaslConfigs;
 import org.apache.kafka.common.config.SslConfigs;
@@ -17,7 +19,7 @@ import org.springframework.beans.factory.annotation.Value;
 import org.springframework.stereotype.Component;
 
 @Component
-class KafkaConfigSanitizer  {
+class KafkaConfigSanitizer {
 
   private static final String SANITIZED_VALUE = "******";
 
@@ -65,10 +67,8 @@ class KafkaConfigSanitizer  {
         .collect(Collectors.toSet());
   }
 
-  public Object sanitize(String key, Object value) {
-    if (value == null) {
-      return null;
-    }
+  @Nullable
+  public Object sanitize(String key, @Nullable Object value) {
     for (Pattern pattern : sanitizeKeysPatterns) {
       if (pattern.matcher(key).matches()) {
         return SANITIZED_VALUE;
@@ -77,5 +77,12 @@ class KafkaConfigSanitizer  {
     return value;
   }
 
+  public Map<String, Object> sanitizeConnectorConfig(@Nullable Map<String, Object> original) {
+    var result = new HashMap<String, Object>(); //null-values supporting map!
+    if (original != null) {
+      original.forEach((k, v) -> result.put(k, sanitize(k, v)));
+    }
+    return result;
+  }
 
 }

+ 4 - 15
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java

@@ -24,7 +24,6 @@ import com.provectus.kafka.ui.model.NewConnectorDTO;
 import com.provectus.kafka.ui.model.TaskDTO;
 import com.provectus.kafka.ui.model.connect.InternalConnectInfo;
 import com.provectus.kafka.ui.util.ReactiveFailover;
-import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
@@ -176,19 +175,14 @@ public class KafkaConnectService {
                         e -> emptyStatus(connectorName))
                     .map(connectorStatus -> {
                       var status = connectorStatus.getConnector();
-                      final Map<String, Object> obfuscatedConfig = connector.getConfig().entrySet()
-                          .stream()
-                          .collect(Collectors.toMap(
-                              Map.Entry::getKey,
-                              e -> kafkaConfigSanitizer.sanitize(e.getKey(), e.getValue())
-                          ));
-                      ConnectorDTO result = (ConnectorDTO) new ConnectorDTO()
+                      var sanitizedConfig = kafkaConfigSanitizer.sanitizeConnectorConfig(connector.getConfig());
+                      ConnectorDTO result = new ConnectorDTO()
                           .connect(connectName)
                           .status(kafkaConnectMapper.fromClient(status))
                           .type(connector.getType())
                           .tasks(connector.getTasks())
                           .name(connector.getName())
-                          .config(obfuscatedConfig);
+                          .config(sanitizedConfig);
 
                       if (connectorStatus.getTasks() != null) {
                         boolean isAnyTaskFailed = connectorStatus.getTasks().stream()
@@ -217,12 +211,7 @@ public class KafkaConnectService {
                                                       String connectorName) {
     return api(cluster, connectName)
         .mono(c -> c.getConnectorConfig(connectorName))
-        .map(connectorConfig -> {
-          final Map<String, Object> obfuscatedMap = new HashMap<>();
-          connectorConfig.forEach((key, value) ->
-              obfuscatedMap.put(key, kafkaConfigSanitizer.sanitize(key, value)));
-          return obfuscatedMap;
-        });
+        .map(kafkaConfigSanitizer::sanitizeConnectorConfig);
   }
 
   public Mono<ConnectorDTO> setConnectorConfig(KafkaCluster cluster, String connectName,

+ 52 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java

@@ -14,11 +14,16 @@ import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
 import com.provectus.kafka.ui.model.SeekDirectionDTO;
+import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
+import com.provectus.kafka.ui.model.SmartFilterTestExecutionResultDTO;
 import com.provectus.kafka.ui.model.TopicMessageDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.serde.api.Serde;
 import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
 import com.provectus.kafka.ui.util.SslPropertiesUtil;
+import java.time.Instant;
+import java.time.OffsetDateTime;
+import java.time.ZoneOffset;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
@@ -81,6 +86,40 @@ public class MessagesService {
         .switchIfEmpty(Mono.error(new TopicNotFoundException()));
   }
 
+  public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) {
+    Predicate<TopicMessageDTO> predicate;
+    try {
+      predicate = MessageFilters.createMsgFilter(
+          execData.getFilterCode(),
+          MessageFilterTypeDTO.GROOVY_SCRIPT
+      );
+    } catch (Exception e) {
+      log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e);
+      return new SmartFilterTestExecutionResultDTO()
+          .error("Compilation error : " + e.getMessage());
+    }
+    try {
+      var result = predicate.test(
+          new TopicMessageDTO()
+              .key(execData.getKey())
+              .content(execData.getValue())
+              .headers(execData.getHeaders())
+              .offset(execData.getOffset())
+              .partition(execData.getPartition())
+              .timestamp(
+                  Optional.ofNullable(execData.getTimestampMs())
+                      .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC))
+                      .orElse(null))
+      );
+      return new SmartFilterTestExecutionResultDTO()
+          .result(result);
+    } catch (Exception e) {
+      log.info("Smart filter {} execution error", execData, e);
+      return new SmartFilterTestExecutionResultDTO()
+          .error("Execution error : " + e.getMessage());
+    }
+  }
+
   public Mono<Void> deleteTopicMessages(KafkaCluster cluster, String topicName,
                                         List<Integer> partitionsToInclude) {
     return withExistingTopic(cluster, topicName)
@@ -127,13 +166,7 @@ public class MessagesService {
             msg.getValueSerde().get()
         );
 
-    Properties properties = new Properties();
-    SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
-    properties.putAll(cluster.getProperties());
-    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
-    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
-    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
-    try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(properties)) {
+    try (KafkaProducer<byte[], byte[]> producer = createProducer(cluster, Map.of())) {
       ProducerRecord<byte[], byte[]> producerRecord = producerRecordCreator.create(
           topicDescription.name(),
           msg.getPartition(),
@@ -155,6 +188,18 @@ public class MessagesService {
     }
   }
 
+  public static KafkaProducer<byte[], byte[]> createProducer(KafkaCluster cluster,
+                                                             Map<String, Object> additionalProps) {
+    Properties properties = new Properties();
+    SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
+    properties.putAll(cluster.getProperties());
+    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
+    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
+    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
+    properties.putAll(additionalProps);
+    return new KafkaProducer<>(properties);
+  }
+
   public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
                                                  ConsumerPosition consumerPosition,
                                                  @Nullable String query,

+ 9 - 12
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/TopicsService.java

@@ -168,21 +168,18 @@ public class TopicsService {
             .map(m -> m.values().stream().findFirst().orElse(List.of())));
   }
 
-  private Mono<InternalTopic> createTopic(KafkaCluster c, ReactiveAdminClient adminClient,
-                                          Mono<TopicCreationDTO> topicCreation) {
-    return topicCreation.flatMap(topicData ->
-            adminClient.createTopic(
-                topicData.getName(),
-                topicData.getPartitions(),
-                topicData.getReplicationFactor(),
-                topicData.getConfigs()
-            ).thenReturn(topicData)
-        )
+  private Mono<InternalTopic> createTopic(KafkaCluster c, ReactiveAdminClient adminClient, TopicCreationDTO topicData) {
+    return adminClient.createTopic(
+            topicData.getName(),
+            topicData.getPartitions(),
+            topicData.getReplicationFactor(),
+            topicData.getConfigs())
+        .thenReturn(topicData)
         .onErrorMap(t -> new TopicMetadataException(t.getMessage(), t))
-        .flatMap(topicData -> loadTopicAfterCreation(c, topicData.getName()));
+        .then(loadTopicAfterCreation(c, topicData.getName()));
   }
 
-  public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicCreationDTO> topicCreation) {
+  public Mono<InternalTopic> createTopic(KafkaCluster cluster, TopicCreationDTO topicCreation) {
     return adminClientService.get(cluster)
         .flatMap(ac -> createTopic(cluster, ac, topicCreation));
   }

+ 97 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/audit/AuditRecord.java

@@ -0,0 +1,97 @@
+package com.provectus.kafka.ui.service.audit;
+
+import com.fasterxml.jackson.annotation.JsonInclude;
+import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.provectus.kafka.ui.exception.CustomBaseException;
+import com.provectus.kafka.ui.exception.ValidationException;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.model.rbac.Resource;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import javax.annotation.Nullable;
+import lombok.SneakyThrows;
+import org.springframework.security.access.AccessDeniedException;
+
+record AuditRecord(String timestamp,
+                   String username,
+                   String clusterName,
+                   List<AuditResource> resources,
+                   String operation,
+                   Object operationParams,
+                   OperationResult result) {
+
+  static final JsonMapper MAPPER = new JsonMapper();
+
+  static {
+    MAPPER.setSerializationInclusion(JsonInclude.Include.NON_NULL);
+  }
+
+  @SneakyThrows
+  String toJson() {
+    return MAPPER.writeValueAsString(this);
+  }
+
+  record AuditResource(String accessType, Resource type, @Nullable Object id) {
+
+    static List<AuditResource> getAccessedResources(AccessContext ctx) {
+      List<AuditResource> resources = new ArrayList<>();
+      ctx.getClusterConfigActions()
+          .forEach(a -> resources.add(new AuditResource(a.name(), Resource.CLUSTERCONFIG, null)));
+      ctx.getTopicActions()
+          .forEach(a -> resources.add(new AuditResource(a.name(), Resource.TOPIC, nameId(ctx.getTopic()))));
+      ctx.getConsumerGroupActions()
+          .forEach(a -> resources.add(new AuditResource(a.name(), Resource.CONSUMER, nameId(ctx.getConsumerGroup()))));
+      ctx.getConnectActions()
+          .forEach(a -> {
+            Map<String, String> resourceId = new LinkedHashMap<>();
+            resourceId.put("connect", ctx.getConnect());
+            if (ctx.getConnector() != null) {
+              resourceId.put("connector", ctx.getConnector());
+            }
+            resources.add(new AuditResource(a.name(), Resource.CONNECT, resourceId));
+          });
+      ctx.getSchemaActions()
+          .forEach(a -> resources.add(new AuditResource(a.name(), Resource.SCHEMA, nameId(ctx.getSchema()))));
+      ctx.getKsqlActions()
+          .forEach(a -> resources.add(new AuditResource(a.name(), Resource.KSQL, null)));
+      ctx.getAclActions()
+          .forEach(a -> resources.add(new AuditResource(a.name(), Resource.ACL, null)));
+      ctx.getAuditAction()
+          .forEach(a -> resources.add(new AuditResource(a.name(), Resource.AUDIT, null)));
+      return resources;
+    }
+
+    @Nullable
+    private static Map<String, Object> nameId(@Nullable String name) {
+      return name != null ? Map.of("name", name) : null;
+    }
+  }
+
+  record OperationResult(boolean success, OperationError error) {
+
+    static OperationResult successful() {
+      return new OperationResult(true, null);
+    }
+
+    static OperationResult error(Throwable th) {
+      OperationError err = OperationError.UNRECOGNIZED_ERROR;
+      if (th instanceof AccessDeniedException) {
+        err = OperationError.ACCESS_DENIED;
+      } else if (th instanceof ValidationException) {
+        err = OperationError.VALIDATION_ERROR;
+      } else if (th instanceof CustomBaseException) {
+        err = OperationError.EXECUTION_ERROR;
+      }
+      return new OperationResult(false, err);
+    }
+
+    enum OperationError {
+      ACCESS_DENIED,
+      VALIDATION_ERROR,
+      EXECUTION_ERROR,
+      UNRECOGNIZED_ERROR
+    }
+  }
+}

+ 209 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/audit/AuditService.java

@@ -0,0 +1,209 @@
+package com.provectus.kafka.ui.service.audit;
+
+import static com.provectus.kafka.ui.service.MessagesService.createProducer;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.provectus.kafka.ui.config.ClustersProperties;
+import com.provectus.kafka.ui.config.auth.AuthenticatedUser;
+import com.provectus.kafka.ui.config.auth.RbacUser;
+import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.service.AdminClientService;
+import com.provectus.kafka.ui.service.ClustersStorage;
+import com.provectus.kafka.ui.service.ReactiveAdminClient;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Supplier;
+import javax.annotation.Nullable;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.security.core.context.SecurityContext;
+import org.springframework.stereotype.Service;
+import reactor.core.publisher.Mono;
+import reactor.core.publisher.Signal;
+
+
+@Slf4j
+@Service
+public class AuditService implements Closeable {
+
+  private static final Mono<AuthenticatedUser> NO_AUTH_USER = Mono.just(new AuthenticatedUser("Unknown", Set.of()));
+
+  private static final String DEFAULT_AUDIT_TOPIC_NAME = "__kui-audit-log";
+  private static final int DEFAULT_AUDIT_TOPIC_PARTITIONS = 1;
+  private static final Map<String, String> DEFAULT_AUDIT_TOPIC_CONFIG = Map.of(
+      "retention.ms", String.valueOf(TimeUnit.DAYS.toMillis(7)),
+      "cleanup.policy", "delete"
+  );
+  private static final Map<String, Object> AUDIT_PRODUCER_CONFIG = Map.of(
+      ProducerConfig.COMPRESSION_TYPE_CONFIG, "gzip"
+  );
+
+  private static final Logger AUDIT_LOGGER = LoggerFactory.getLogger("audit");
+
+  private final Map<String, AuditWriter> auditWriters;
+
+  @Autowired
+  public AuditService(AdminClientService adminClientService, ClustersStorage clustersStorage) {
+    Map<String, AuditWriter> auditWriters = new HashMap<>();
+    for (var cluster : clustersStorage.getKafkaClusters()) {
+      ReactiveAdminClient adminClient;
+      try {
+        adminClient = adminClientService.get(cluster).block();
+      } catch (Exception e) {
+        printAuditInitError(cluster, "Error connect to cluster", e);
+        continue;
+      }
+      createAuditWriter(cluster, adminClient, () -> createProducer(cluster, AUDIT_PRODUCER_CONFIG))
+          .ifPresent(writer -> auditWriters.put(cluster.getName(), writer));
+    }
+    this.auditWriters = auditWriters;
+  }
+
+  @VisibleForTesting
+  AuditService(Map<String, AuditWriter> auditWriters) {
+    this.auditWriters = auditWriters;
+  }
+
+  @VisibleForTesting
+  static Optional<AuditWriter> createAuditWriter(KafkaCluster cluster,
+                                                 ReactiveAdminClient ac,
+                                                 Supplier<KafkaProducer<byte[], byte[]>> producerFactory) {
+    var auditProps = cluster.getOriginalProperties().getAudit();
+    if (auditProps == null) {
+      return Optional.empty();
+    }
+    boolean topicAudit = Optional.ofNullable(auditProps.getTopicAuditEnabled()).orElse(false);
+    boolean consoleAudit = Optional.ofNullable(auditProps.getConsoleAuditEnabled()).orElse(false);
+    if (!topicAudit && !consoleAudit) {
+      return Optional.empty();
+    }
+    String auditTopicName = Optional.ofNullable(auditProps.getTopic()).orElse(DEFAULT_AUDIT_TOPIC_NAME);
+    @Nullable KafkaProducer<byte[], byte[]> producer = null;
+    if (topicAudit && createTopicIfNeeded(cluster, ac, auditTopicName, auditProps)) {
+      producer = producerFactory.get();
+    }
+    log.info("Audit service initialized for cluster '{}'", cluster.getName());
+    return Optional.of(
+        new AuditWriter(
+            cluster.getName(),
+            auditTopicName,
+            producer,
+            consoleAudit ? AUDIT_LOGGER : null
+        )
+    );
+  }
+
+  /**
+   * return true if topic created/existing and producing can be enabled.
+   */
+  private static boolean createTopicIfNeeded(KafkaCluster cluster,
+                                             ReactiveAdminClient ac,
+                                             String auditTopicName,
+                                             ClustersProperties.AuditProperties auditProps) {
+    boolean topicExists;
+    try {
+      topicExists = ac.listTopics(true).block().contains(auditTopicName);
+    } catch (Exception e) {
+      printAuditInitError(cluster, "Error checking audit topic existence", e);
+      return false;
+    }
+    if (topicExists) {
+      return true;
+    }
+    try {
+      int topicPartitions =
+          Optional.ofNullable(auditProps.getAuditTopicsPartitions())
+              .orElse(DEFAULT_AUDIT_TOPIC_PARTITIONS);
+
+      Map<String, String> topicConfig = new HashMap<>(DEFAULT_AUDIT_TOPIC_CONFIG);
+      Optional.ofNullable(auditProps.getAuditTopicProperties())
+          .ifPresent(topicConfig::putAll);
+
+      log.info("Creating audit topic '{}' for cluster '{}'", auditTopicName, cluster.getName());
+      ac.createTopic(auditTopicName, topicPartitions, null, topicConfig).block();
+      log.info("Audit topic created for cluster '{}'", cluster.getName());
+      return true;
+    } catch (Exception e) {
+      printAuditInitError(cluster, "Error creating topic '%s'".formatted(auditTopicName), e);
+      return false;
+    }
+  }
+
+  private static void printAuditInitError(KafkaCluster cluster, String errorMsg, Exception cause) {
+    log.error("-----------------------------------------------------------------");
+    log.error(
+        "Error initializing Audit Service for cluster '{}'. Audit will be disabled. See error below: ",
+        cluster.getName()
+    );
+    log.error("{}", errorMsg, cause);
+    log.error("-----------------------------------------------------------------");
+  }
+
+  public boolean isAuditTopic(KafkaCluster cluster, String topic) {
+    var writer = auditWriters.get(cluster.getName());
+    return writer != null
+        && topic.equals(writer.targetTopic())
+        && writer.isTopicWritingEnabled();
+  }
+
+  public void audit(AccessContext acxt, Signal<?> sig) {
+    if (sig.isOnComplete()) {
+      extractUser(sig)
+          .doOnNext(u -> sendAuditRecord(acxt, u))
+          .subscribe();
+    } else if (sig.isOnError()) {
+      extractUser(sig)
+          .doOnNext(u -> sendAuditRecord(acxt, u, sig.getThrowable()))
+          .subscribe();
+    }
+  }
+
+  private Mono<AuthenticatedUser> extractUser(Signal<?> sig) {
+    //see ReactiveSecurityContextHolder for impl details
+    Object key = SecurityContext.class;
+    if (sig.getContextView().hasKey(key)) {
+      return sig.getContextView().<Mono<SecurityContext>>get(key)
+          .map(context -> context.getAuthentication().getPrincipal())
+          .cast(RbacUser.class)
+          .map(user -> new AuthenticatedUser(user.name(), user.groups()))
+          .switchIfEmpty(NO_AUTH_USER);
+    } else {
+      return NO_AUTH_USER;
+    }
+  }
+
+  private void sendAuditRecord(AccessContext ctx, AuthenticatedUser user) {
+    sendAuditRecord(ctx, user, null);
+  }
+
+  private void sendAuditRecord(AccessContext ctx, AuthenticatedUser user, @Nullable Throwable th) {
+    try {
+      if (ctx.getCluster() != null) {
+        var writer = auditWriters.get(ctx.getCluster());
+        if (writer != null) {
+          writer.write(ctx, user, th);
+        }
+      } else {
+        // cluster-independent operation
+        AuditWriter.writeAppOperation(AUDIT_LOGGER, ctx, user, th);
+      }
+    } catch (Exception e) {
+      log.warn("Error sending audit record", e);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    auditWriters.values().forEach(AuditWriter::close);
+  }
+}

+ 78 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/audit/AuditWriter.java

@@ -0,0 +1,78 @@
+package com.provectus.kafka.ui.service.audit;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+import com.provectus.kafka.ui.config.auth.AuthenticatedUser;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.service.audit.AuditRecord.AuditResource;
+import com.provectus.kafka.ui.service.audit.AuditRecord.OperationResult;
+import java.io.Closeable;
+import java.time.Instant;
+import java.time.format.DateTimeFormatter;
+import java.util.Optional;
+import javax.annotation.Nullable;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.slf4j.Logger;
+
+@Slf4j
+record AuditWriter(String clusterName,
+                   String targetTopic,
+                   @Nullable KafkaProducer<byte[], byte[]> producer,
+                   @Nullable Logger consoleLogger) implements Closeable {
+
+  boolean isTopicWritingEnabled() {
+    return producer != null;
+  }
+
+  // application-level (cluster-independent) operation
+  static void writeAppOperation(Logger consoleLogger,
+                                AccessContext ctx,
+                                AuthenticatedUser user,
+                                @Nullable Throwable th) {
+    consoleLogger.info(createRecord(ctx, user, th).toJson());
+  }
+
+  void write(AccessContext ctx, AuthenticatedUser user, @Nullable Throwable th) {
+    write(createRecord(ctx, user, th));
+  }
+
+  private void write(AuditRecord rec) {
+    String json = rec.toJson();
+    if (consoleLogger != null) {
+      consoleLogger.info(json);
+    }
+    if (producer != null) {
+      producer.send(
+          new ProducerRecord<>(targetTopic, null, json.getBytes(UTF_8)),
+          (metadata, ex) -> {
+            if (ex != null) {
+              log.warn("Error sending Audit record to kafka for cluster {}", clusterName, ex);
+            }
+          });
+    }
+  }
+
+  private static AuditRecord createRecord(AccessContext ctx,
+                                          AuthenticatedUser user,
+                                          @Nullable Throwable th) {
+    return new AuditRecord(
+        DateTimeFormatter.ISO_INSTANT.format(Instant.now()),
+        user.principal(),
+        ctx.getCluster(), //can be null, if it is application-level action
+        AuditResource.getAccessedResources(ctx),
+        ctx.getOperationName(),
+        ctx.getOperationParams(),
+        th == null ? OperationResult.successful() : OperationResult.error(th)
+    );
+  }
+
+  @Override
+  public void close() {
+    Optional.ofNullable(producer).ifPresent(KafkaProducer::close);
+  }
+
+}
+
+

+ 33 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/rbac/AccessControlService.java

@@ -109,7 +109,8 @@ public class AccessControlService {
                   && isConnectorAccessible(context, user) // TODO connector selectors
                   && isSchemaAccessible(context, user)
                   && isKsqlAccessible(context, user)
-                  && isAclAccessible(context, user);
+                  && isAclAccessible(context, user)
+                  && isAuditAccessible(context, user);
 
           if (!accessGranted) {
             throw new AccessDeniedException("Access denied");
@@ -202,19 +203,23 @@ public class AccessControlService {
     return isAccessible(Resource.TOPIC, context.getTopic(), user, context, requiredActions);
   }
 
-  public Mono<Boolean> isTopicAccessible(InternalTopic dto, String clusterName) {
+  public Mono<List<InternalTopic>> filterViewableTopics(List<InternalTopic> topics, String clusterName) {
     if (!rbacEnabled) {
-      return Mono.just(true);
+      return Mono.just(topics);
     }
 
-    AccessContext accessContext = AccessContext
-        .builder()
-        .cluster(clusterName)
-        .topic(dto.getName())
-        .topicActions(TopicAction.VIEW)
-        .build();
-
-    return getUser().map(u -> isTopicAccessible(accessContext, u));
+    return getUser()
+        .map(user -> topics.stream()
+            .filter(topic -> {
+                  var accessContext = AccessContext
+                      .builder()
+                      .cluster(clusterName)
+                      .topic(topic.getName())
+                      .topicActions(TopicAction.VIEW)
+                      .build();
+                  return isTopicAccessible(accessContext, user);
+                }
+            ).toList());
   }
 
   private boolean isConsumerGroupAccessible(AccessContext context, AuthenticatedUser user) {
@@ -382,6 +387,23 @@ public class AccessControlService {
     return isAccessible(Resource.ACL, null, user, context, requiredActions);
   }
 
+  private boolean isAuditAccessible(AccessContext context, AuthenticatedUser user) {
+    if (!rbacEnabled) {
+      return true;
+    }
+
+    if (context.getAuditAction().isEmpty()) {
+      return true;
+    }
+
+    Set<String> requiredActions = context.getAuditAction()
+        .stream()
+        .map(a -> a.toString().toUpperCase())
+        .collect(Collectors.toSet());
+
+    return isAccessible(Resource.AUDIT, null, user, context, requiredActions);
+  }
+
   public Set<ProviderAuthorityExtractor> getOauthExtractors() {
     return oauthExtractors;
   }

+ 16 - 26
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java

@@ -5,6 +5,7 @@ import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.stream.Collectors;
 import org.apache.avro.Schema;
 import reactor.util.function.Tuple2;
@@ -40,6 +41,10 @@ public class AvroJsonSchemaConverter implements JsonSchemaConverter<Schema> {
 
   private FieldSchema convertSchema(Schema schema,
                                     Map<String, FieldSchema> definitions, boolean isRoot) {
+    Optional<FieldSchema> logicalTypeSchema = JsonAvroConversion.LogicalTypeConversion.getJsonSchema(schema);
+    if (logicalTypeSchema.isPresent()) {
+      return logicalTypeSchema.get();
+    }
     if (!schema.isUnion()) {
       JsonType type = convertType(schema);
       switch (type.getType()) {
@@ -66,7 +71,6 @@ public class AvroJsonSchemaConverter implements JsonSchemaConverter<Schema> {
     }
   }
 
-
   // this method formats json-schema field in a way
   // to fit avro-> json encoding rules (https://avro.apache.org/docs/1.11.1/specification/_print/#json-encoding)
   private FieldSchema createUnionSchema(Schema schema, Map<String, FieldSchema> definitions) {
@@ -147,30 +151,16 @@ public class AvroJsonSchemaConverter implements JsonSchemaConverter<Schema> {
   }
 
   private JsonType convertType(Schema schema) {
-    switch (schema.getType()) {
-      case INT:
-      case LONG:
-        return new SimpleJsonType(JsonType.Type.INTEGER);
-      case MAP:
-      case RECORD:
-        return new SimpleJsonType(JsonType.Type.OBJECT);
-      case ENUM:
-        return new EnumJsonType(schema.getEnumSymbols());
-      case BYTES:
-      case STRING:
-        return new SimpleJsonType(JsonType.Type.STRING);
-      case NULL:
-        return new SimpleJsonType(JsonType.Type.NULL);
-      case ARRAY:
-        return new SimpleJsonType(JsonType.Type.ARRAY);
-      case FIXED:
-      case FLOAT:
-      case DOUBLE:
-        return new SimpleJsonType(JsonType.Type.NUMBER);
-      case BOOLEAN:
-        return new SimpleJsonType(JsonType.Type.BOOLEAN);
-      default:
-        return new SimpleJsonType(JsonType.Type.STRING);
-    }
+    return switch (schema.getType()) {
+      case INT, LONG -> new SimpleJsonType(JsonType.Type.INTEGER);
+      case MAP, RECORD -> new SimpleJsonType(JsonType.Type.OBJECT);
+      case ENUM -> new EnumJsonType(schema.getEnumSymbols());
+      case BYTES, STRING -> new SimpleJsonType(JsonType.Type.STRING);
+      case NULL -> new SimpleJsonType(JsonType.Type.NULL);
+      case ARRAY -> new SimpleJsonType(JsonType.Type.ARRAY);
+      case FIXED, FLOAT, DOUBLE -> new SimpleJsonType(JsonType.Type.NUMBER);
+      case BOOLEAN -> new SimpleJsonType(JsonType.Type.BOOLEAN);
+      default -> new SimpleJsonType(JsonType.Type.STRING);
+    };
   }
 }

+ 542 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/JsonAvroConversion.java

@@ -0,0 +1,542 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.BooleanNode;
+import com.fasterxml.jackson.databind.node.DecimalNode;
+import com.fasterxml.jackson.databind.node.DoubleNode;
+import com.fasterxml.jackson.databind.node.FloatNode;
+import com.fasterxml.jackson.databind.node.IntNode;
+import com.fasterxml.jackson.databind.node.JsonNodeType;
+import com.fasterxml.jackson.databind.node.LongNode;
+import com.fasterxml.jackson.databind.node.NullNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.fasterxml.jackson.databind.node.TextNode;
+import com.google.common.collect.Lists;
+import com.provectus.kafka.ui.exception.JsonAvroConversionException;
+import io.confluent.kafka.serializers.AvroData;
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.time.ZoneOffset;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.TimeUnit;
+import java.util.function.BiFunction;
+import java.util.stream.Stream;
+import org.apache.avro.Schema;
+import org.apache.avro.generic.GenericData;
+
+// json <-> avro
+public class JsonAvroConversion {
+
+  private static final JsonMapper MAPPER = new JsonMapper();
+  private static final Schema NULL_SCHEMA = Schema.create(Schema.Type.NULL);
+
+  // converts json into Object that is expected input for KafkaAvroSerializer
+  // (with AVRO_USE_LOGICAL_TYPE_CONVERTERS flat enabled!)
+  public static Object convertJsonToAvro(String jsonString, Schema avroSchema) {
+    JsonNode rootNode = null;
+    try {
+      rootNode = MAPPER.readTree(jsonString);
+    } catch (JsonProcessingException e) {
+      throw new JsonAvroConversionException("String is not a valid json");
+    }
+    return convert(rootNode, avroSchema);
+  }
+
+  private static Object convert(JsonNode node, Schema avroSchema) {
+    return switch (avroSchema.getType()) {
+      case RECORD -> {
+        assertJsonType(node, JsonNodeType.OBJECT);
+        var rec = new GenericData.Record(avroSchema);
+        for (Schema.Field field : avroSchema.getFields()) {
+          if (node.has(field.name()) && !node.get(field.name()).isNull()) {
+            rec.put(field.name(), convert(node.get(field.name()), field.schema()));
+          }
+        }
+        yield rec;
+      }
+      case MAP -> {
+        assertJsonType(node, JsonNodeType.OBJECT);
+        var map = new LinkedHashMap<String, Object>();
+        var valueSchema = avroSchema.getValueType();
+        node.fields().forEachRemaining(f -> map.put(f.getKey(), convert(f.getValue(), valueSchema)));
+        yield map;
+      }
+      case ARRAY -> {
+        assertJsonType(node, JsonNodeType.ARRAY);
+        var lst = new ArrayList<>();
+        node.elements().forEachRemaining(e -> lst.add(convert(e, avroSchema.getElementType())));
+        yield lst;
+      }
+      case ENUM -> {
+        assertJsonType(node, JsonNodeType.STRING);
+        String symbol = node.textValue();
+        if (!avroSchema.getEnumSymbols().contains(symbol)) {
+          throw new JsonAvroConversionException("%s is not a part of enum symbols [%s]"
+              .formatted(symbol, avroSchema.getEnumSymbols()));
+        }
+        yield new GenericData.EnumSymbol(avroSchema, symbol);
+      }
+      case UNION -> {
+        // for types from enum (other than null) payload should be an object with single key == name of type
+        // ex: schema = [ "null", "int", "string" ], possible payloads = null, { "string": "str" },  { "int": 123 }
+        if (node.isNull() && avroSchema.getTypes().contains(NULL_SCHEMA)) {
+          yield null;
+        }
+
+        assertJsonType(node, JsonNodeType.OBJECT);
+        var elements = Lists.newArrayList(node.fields());
+        if (elements.size() != 1) {
+          throw new JsonAvroConversionException(
+              "UNION field value should be an object with single field == type name");
+        }
+        Map.Entry<String, JsonNode> typeNameToValue = elements.get(0);
+        List<Schema> candidates = new ArrayList<>();
+        for (Schema unionType : avroSchema.getTypes()) {
+          if (typeNameToValue.getKey().equals(unionType.getFullName())) {
+            yield convert(typeNameToValue.getValue(), unionType);
+          }
+          if (typeNameToValue.getKey().equals(unionType.getName())) {
+            candidates.add(unionType);
+          }
+        }
+        if (candidates.size() == 1) {
+          yield convert(typeNameToValue.getValue(), candidates.get(0));
+        }
+        if (candidates.size() > 1) {
+          throw new JsonAvroConversionException(
+              "Can't select type within union for value '%s'. Provide full type name.".formatted(node)
+          );
+        }
+        throw new JsonAvroConversionException(
+            "json value '%s' is cannot be converted to any of union types [%s]"
+                .formatted(node, avroSchema.getTypes()));
+      }
+      case STRING -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(node, avroSchema);
+        }
+        assertJsonType(node, JsonNodeType.STRING);
+        yield node.textValue();
+      }
+      case LONG -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(node, avroSchema);
+        }
+        assertJsonType(node, JsonNodeType.NUMBER);
+        assertJsonNumberType(node, JsonParser.NumberType.LONG, JsonParser.NumberType.INT);
+        yield node.longValue();
+      }
+      case INT -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(node, avroSchema);
+        }
+        assertJsonType(node, JsonNodeType.NUMBER);
+        assertJsonNumberType(node, JsonParser.NumberType.INT);
+        yield node.intValue();
+      }
+      case FLOAT -> {
+        assertJsonType(node, JsonNodeType.NUMBER);
+        assertJsonNumberType(node, JsonParser.NumberType.DOUBLE, JsonParser.NumberType.FLOAT);
+        yield node.floatValue();
+      }
+      case DOUBLE -> {
+        assertJsonType(node, JsonNodeType.NUMBER);
+        assertJsonNumberType(node, JsonParser.NumberType.DOUBLE, JsonParser.NumberType.FLOAT);
+        yield node.doubleValue();
+      }
+      case BOOLEAN -> {
+        assertJsonType(node, JsonNodeType.BOOLEAN);
+        yield node.booleanValue();
+      }
+      case NULL -> {
+        assertJsonType(node, JsonNodeType.NULL);
+        yield null;
+      }
+      case BYTES -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(node, avroSchema);
+        }
+        assertJsonType(node, JsonNodeType.STRING);
+        // logic copied from JsonDecoder::readBytes
+        yield ByteBuffer.wrap(node.textValue().getBytes(StandardCharsets.ISO_8859_1));
+      }
+      case FIXED -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(node, avroSchema);
+        }
+        assertJsonType(node, JsonNodeType.STRING);
+        byte[] bytes = node.textValue().getBytes(StandardCharsets.ISO_8859_1);
+        if (bytes.length != avroSchema.getFixedSize()) {
+          throw new JsonAvroConversionException(
+              "Fixed field has unexpected size %d (should be %d)"
+                  .formatted(bytes.length, avroSchema.getFixedSize()));
+        }
+        yield new GenericData.Fixed(avroSchema, bytes);
+      }
+    };
+  }
+
+  // converts output of KafkaAvroDeserializer (with AVRO_USE_LOGICAL_TYPE_CONVERTERS flat enabled!) into json.
+  // Note: conversion should be compatible with AvroJsonSchemaConverter logic!
+  public static JsonNode convertAvroToJson(Object obj, Schema avroSchema) {
+    if (obj == null) {
+      return NullNode.getInstance();
+    }
+    return switch (avroSchema.getType()) {
+      case RECORD -> {
+        var rec = (GenericData.Record) obj;
+        ObjectNode node = MAPPER.createObjectNode();
+        for (Schema.Field field : avroSchema.getFields()) {
+          var fieldVal = rec.get(field.name());
+          if (fieldVal != null) {
+            node.set(field.name(), convertAvroToJson(fieldVal, field.schema()));
+          }
+        }
+        yield node;
+      }
+      case MAP -> {
+        ObjectNode node = MAPPER.createObjectNode();
+        ((Map) obj).forEach((k, v) -> node.set(k.toString(), convertAvroToJson(v, avroSchema.getValueType())));
+        yield node;
+      }
+      case ARRAY -> {
+        var list = (List<Object>) obj;
+        ArrayNode node = MAPPER.createArrayNode();
+        list.forEach(e -> node.add(convertAvroToJson(e, avroSchema.getElementType())));
+        yield node;
+      }
+      case ENUM -> {
+        yield new TextNode(obj.toString());
+      }
+      case UNION -> {
+        ObjectNode node = MAPPER.createObjectNode();
+        int unionIdx = AvroData.getGenericData().resolveUnion(avroSchema, obj);
+        Schema selectedType = avroSchema.getTypes().get(unionIdx);
+        node.set(
+            selectUnionTypeFieldName(avroSchema, selectedType, unionIdx),
+            convertAvroToJson(obj, selectedType)
+        );
+        yield node;
+      }
+      case STRING -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(obj, avroSchema);
+        }
+        yield new TextNode(obj.toString());
+      }
+      case LONG -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(obj, avroSchema);
+        }
+        yield new LongNode((Long) obj);
+      }
+      case INT -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(obj, avroSchema);
+        }
+        yield new IntNode((Integer) obj);
+      }
+      case FLOAT -> new FloatNode((Float) obj);
+      case DOUBLE -> new DoubleNode((Double) obj);
+      case BOOLEAN -> BooleanNode.valueOf((Boolean) obj);
+      case NULL -> NullNode.getInstance();
+      case BYTES -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(obj, avroSchema);
+        }
+        ByteBuffer bytes = (ByteBuffer) obj;
+        //see JsonEncoder::writeByteArray
+        yield new TextNode(new String(bytes.array(), StandardCharsets.ISO_8859_1));
+      }
+      case FIXED -> {
+        if (isLogicalType(avroSchema)) {
+          yield processLogicalType(obj, avroSchema);
+        }
+        var fixed = (GenericData.Fixed) obj;
+        yield new TextNode(new String(fixed.bytes(), StandardCharsets.ISO_8859_1));
+      }
+    };
+  }
+
+  // select name for a key field that represents type name of union.
+  // For records selects short name, if it is possible.
+  private static String selectUnionTypeFieldName(Schema unionSchema,
+                                                 Schema chosenType,
+                                                 int chosenTypeIdx) {
+    var types = unionSchema.getTypes();
+    if (types.size() == 2 && types.contains(NULL_SCHEMA)) {
+      return chosenType.getName();
+    }
+    for (int i = 0; i < types.size(); i++) {
+      if (i != chosenTypeIdx && chosenType.getName().equals(types.get(i).getName())) {
+        // there is another type inside union with the same name
+        // so, we have to use fullname
+        return chosenType.getFullName();
+      }
+    }
+    return chosenType.getName();
+  }
+
+  private static Object processLogicalType(JsonNode node, Schema schema) {
+    return findConversion(schema)
+        .map(c -> c.jsonToAvroConversion.apply(node, schema))
+        .orElseThrow(() ->
+            new JsonAvroConversionException("'%s' logical type is not supported"
+                .formatted(schema.getLogicalType().getName())));
+  }
+
+  private static JsonNode processLogicalType(Object obj, Schema schema) {
+    return findConversion(schema)
+        .map(c -> c.avroToJsonConversion.apply(obj, schema))
+        .orElseThrow(() ->
+            new JsonAvroConversionException("'%s' logical type is not supported"
+                .formatted(schema.getLogicalType().getName())));
+  }
+
+  private static Optional<LogicalTypeConversion> findConversion(Schema schema) {
+    String logicalTypeName = schema.getLogicalType().getName();
+    return Stream.of(LogicalTypeConversion.values())
+        .filter(t -> t.name.equalsIgnoreCase(logicalTypeName))
+        .findFirst();
+  }
+
+  private static boolean isLogicalType(Schema schema) {
+    return schema.getLogicalType() != null;
+  }
+
+  private static void assertJsonType(JsonNode node, JsonNodeType... allowedTypes) {
+    if (Stream.of(allowedTypes).noneMatch(t -> node.getNodeType() == t)) {
+      throw new JsonAvroConversionException(
+          "%s node has unexpected type, allowed types %s, actual type %s"
+              .formatted(node, Arrays.toString(allowedTypes), node.getNodeType()));
+    }
+  }
+
+  private static void assertJsonNumberType(JsonNode node, JsonParser.NumberType... allowedTypes) {
+    if (Stream.of(allowedTypes).noneMatch(t -> node.numberType() == t)) {
+      throw new JsonAvroConversionException(
+          "%s node has unexpected numeric type, allowed types %s, actual type %s"
+              .formatted(node, Arrays.toString(allowedTypes), node.numberType()));
+    }
+  }
+
+  enum LogicalTypeConversion {
+
+    UUID("uuid",
+        (node, schema) -> {
+          assertJsonType(node, JsonNodeType.STRING);
+          return java.util.UUID.fromString(node.asText());
+        },
+        (obj, schema) -> {
+          return new TextNode(obj.toString());
+        },
+        new SimpleFieldSchema(
+            new SimpleJsonType(
+                JsonType.Type.STRING,
+                Map.of("format", new TextNode("uuid"))))
+    ),
+
+    DECIMAL("decimal",
+        (node, schema) -> {
+          if (node.isTextual()) {
+            return new BigDecimal(node.asText());
+          } else if (node.isNumber()) {
+            return new BigDecimal(node.numberValue().toString());
+          }
+          throw new JsonAvroConversionException(
+              "node '%s' can't be converted to decimal logical type"
+                  .formatted(node));
+        },
+        (obj, schema) -> {
+          return new DecimalNode((BigDecimal) obj);
+        },
+        new SimpleFieldSchema(new SimpleJsonType(JsonType.Type.NUMBER))
+    ),
+
+    DATE("date",
+        (node, schema) -> {
+          if (node.isInt()) {
+            return LocalDate.ofEpochDay(node.intValue());
+          } else if (node.isTextual()) {
+            return LocalDate.parse(node.asText());
+          } else {
+            throw new JsonAvroConversionException(
+                "node '%s' can't be converted to date logical type"
+                    .formatted(node));
+          }
+        },
+        (obj, schema) -> {
+          return new TextNode(obj.toString());
+        },
+        new SimpleFieldSchema(
+            new SimpleJsonType(
+                JsonType.Type.STRING,
+                Map.of("format", new TextNode("date"))))
+    ),
+
+    TIME_MILLIS("time-millis",
+        (node, schema) -> {
+          if (node.isIntegralNumber()) {
+            return LocalTime.ofNanoOfDay(TimeUnit.MILLISECONDS.toNanos(node.longValue()));
+          } else if (node.isTextual()) {
+            return LocalTime.parse(node.asText());
+          } else {
+            throw new JsonAvroConversionException(
+                "node '%s' can't be converted to time-millis logical type"
+                    .formatted(node));
+          }
+        },
+        (obj, schema) -> {
+          return new TextNode(obj.toString());
+        },
+        new SimpleFieldSchema(
+            new SimpleJsonType(
+                JsonType.Type.STRING,
+                Map.of("format", new TextNode("time"))))
+    ),
+
+    TIME_MICROS("time-micros",
+        (node, schema) -> {
+          if (node.isIntegralNumber()) {
+            return LocalTime.ofNanoOfDay(TimeUnit.MICROSECONDS.toNanos(node.longValue()));
+          } else if (node.isTextual()) {
+            return LocalTime.parse(node.asText());
+          } else {
+            throw new JsonAvroConversionException(
+                "node '%s' can't be converted to time-micros logical type"
+                    .formatted(node));
+          }
+        },
+        (obj, schema) -> {
+          return new TextNode(obj.toString());
+        },
+        new SimpleFieldSchema(
+            new SimpleJsonType(
+                JsonType.Type.STRING,
+                Map.of("format", new TextNode("time"))))
+    ),
+
+    TIMESTAMP_MILLIS("timestamp-millis",
+        (node, schema) -> {
+          if (node.isIntegralNumber()) {
+            return Instant.ofEpochMilli(node.longValue());
+          } else if (node.isTextual()) {
+            return Instant.parse(node.asText());
+          } else {
+            throw new JsonAvroConversionException(
+                "node '%s' can't be converted to timestamp-millis logical type"
+                    .formatted(node));
+          }
+        },
+        (obj, schema) -> {
+          return new TextNode(obj.toString());
+        },
+        new SimpleFieldSchema(
+            new SimpleJsonType(
+                JsonType.Type.STRING,
+                Map.of("format", new TextNode("date-time"))))
+    ),
+
+    TIMESTAMP_MICROS("timestamp-micros",
+        (node, schema) -> {
+          if (node.isIntegralNumber()) {
+            // TimeConversions.TimestampMicrosConversion for impl
+            long microsFromEpoch = node.longValue();
+            long epochSeconds = microsFromEpoch / (1_000_000L);
+            long nanoAdjustment = (microsFromEpoch % (1_000_000L)) * 1_000L;
+            return Instant.ofEpochSecond(epochSeconds, nanoAdjustment);
+          } else if (node.isTextual()) {
+            return Instant.parse(node.asText());
+          } else {
+            throw new JsonAvroConversionException(
+                "node '%s' can't be converted to timestamp-millis logical type"
+                    .formatted(node));
+          }
+        },
+        (obj, schema) -> {
+          return new TextNode(obj.toString());
+        },
+        new SimpleFieldSchema(
+            new SimpleJsonType(
+                JsonType.Type.STRING,
+                Map.of("format", new TextNode("date-time"))))
+    ),
+
+    LOCAL_TIMESTAMP_MILLIS("local-timestamp-millis",
+        (node, schema) -> {
+          if (node.isTextual()) {
+            return LocalDateTime.parse(node.asText());
+          }
+          // TimeConversions.TimestampMicrosConversion for impl
+          Instant instant = (Instant) TIMESTAMP_MILLIS.jsonToAvroConversion.apply(node, schema);
+          return LocalDateTime.ofInstant(instant, ZoneOffset.UTC);
+        },
+        (obj, schema) -> {
+          return new TextNode(obj.toString());
+        },
+        new SimpleFieldSchema(
+            new SimpleJsonType(
+                JsonType.Type.STRING,
+                Map.of("format", new TextNode("date-time"))))
+    ),
+
+    LOCAL_TIMESTAMP_MICROS("local-timestamp-micros",
+        (node, schema) -> {
+          if (node.isTextual()) {
+            return LocalDateTime.parse(node.asText());
+          }
+          Instant instant = (Instant) TIMESTAMP_MICROS.jsonToAvroConversion.apply(node, schema);
+          return LocalDateTime.ofInstant(instant, ZoneOffset.UTC);
+        },
+        (obj, schema) -> {
+          return new TextNode(obj.toString());
+        },
+        new SimpleFieldSchema(
+            new SimpleJsonType(
+                JsonType.Type.STRING,
+                Map.of("format", new TextNode("date-time"))))
+    );
+
+    private final String name;
+    private final BiFunction<JsonNode, Schema, Object> jsonToAvroConversion;
+    private final BiFunction<Object, Schema, JsonNode> avroToJsonConversion;
+    private final FieldSchema jsonSchema;
+
+    LogicalTypeConversion(String name,
+                          BiFunction<JsonNode, Schema, Object> jsonToAvroConversion,
+                          BiFunction<Object, Schema, JsonNode> avroToJsonConversion,
+                          FieldSchema jsonSchema) {
+      this.name = name;
+      this.jsonToAvroConversion = jsonToAvroConversion;
+      this.avroToJsonConversion = avroToJsonConversion;
+      this.jsonSchema = jsonSchema;
+    }
+
+    static Optional<FieldSchema> getJsonSchema(Schema schema) {
+      if (schema.getLogicalType() == null) {
+        return Optional.empty();
+      }
+      String logicalTypeName = schema.getLogicalType().getName();
+      return Stream.of(JsonAvroConversion.LogicalTypeConversion.values())
+          .filter(t -> t.name.equalsIgnoreCase(logicalTypeName))
+          .map(c -> c.jsonSchema)
+          .findFirst();
+    }
+  }
+
+
+}

+ 3 - 0
kafka-ui-api/src/main/resources/application-local.yml

@@ -144,3 +144,6 @@ rbac:
 
         - resource: acl
           actions: all
+
+        - resource: audit
+          actions: all

+ 2 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/AbstractIntegrationTest.java

@@ -74,6 +74,8 @@ public abstract class AbstractIntegrationTest {
       System.setProperty("kafka.clusters.0.masking.0.type", "REPLACE");
       System.setProperty("kafka.clusters.0.masking.0.replacement", "***");
       System.setProperty("kafka.clusters.0.masking.0.topicValuesPattern", "masking-test-.*");
+      System.setProperty("kafka.clusters.0.audit.topicAuditEnabled", "true");
+      System.setProperty("kafka.clusters.0.audit.consoleAuditEnabled", "true");
 
       System.setProperty("kafka.clusters.1.name", SECOND_LOCAL);
       System.setProperty("kafka.clusters.1.readOnly", "true");

+ 185 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/serdes/builtin/ConsumerOffsetsSerdeTest.java

@@ -0,0 +1,185 @@
+package com.provectus.kafka.ui.serdes.builtin;
+
+import static com.provectus.kafka.ui.serdes.builtin.ConsumerOffsetsSerde.TOPIC;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.provectus.kafka.ui.AbstractIntegrationTest;
+import com.provectus.kafka.ui.producer.KafkaTestProducer;
+import com.provectus.kafka.ui.serde.api.DeserializeResult;
+import com.provectus.kafka.ui.serde.api.Serde;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.UUID;
+import lombok.SneakyThrows;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.serialization.BytesDeserializer;
+import org.apache.kafka.common.utils.Bytes;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.testcontainers.shaded.org.awaitility.Awaitility;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+class ConsumerOffsetsSerdeTest extends AbstractIntegrationTest {
+
+  private static final int MSGS_TO_GENERATE = 10;
+
+  private static String consumerGroupName;
+  private static String committedTopic;
+
+  @BeforeAll
+  static void createTopicAndCommitItsOffset() {
+    committedTopic = ConsumerOffsetsSerdeTest.class.getSimpleName() + "-" + UUID.randomUUID();
+    consumerGroupName = committedTopic + "-group";
+    createTopic(new NewTopic(committedTopic, 1, (short) 1));
+
+    try (var producer = KafkaTestProducer.forKafka(kafka)) {
+      for (int i = 0; i < MSGS_TO_GENERATE; i++) {
+        producer.send(committedTopic, "i=" + i);
+      }
+    }
+    try (var consumer = createConsumer(consumerGroupName)) {
+      consumer.subscribe(List.of(committedTopic));
+      int polled = 0;
+      while (polled < MSGS_TO_GENERATE) {
+        polled += consumer.poll(Duration.ofMillis(100)).count();
+      }
+      consumer.commitSync();
+    }
+  }
+
+  @AfterAll
+  static void cleanUp() {
+    deleteTopic(committedTopic);
+  }
+
+  @Test
+  void canOnlyDeserializeConsumerOffsetsTopic() {
+    var serde = new ConsumerOffsetsSerde();
+    assertThat(serde.canDeserialize(ConsumerOffsetsSerde.TOPIC, Serde.Target.KEY)).isTrue();
+    assertThat(serde.canDeserialize(ConsumerOffsetsSerde.TOPIC, Serde.Target.VALUE)).isTrue();
+    assertThat(serde.canDeserialize("anyOtherTopic", Serde.Target.KEY)).isFalse();
+    assertThat(serde.canDeserialize("anyOtherTopic", Serde.Target.VALUE)).isFalse();
+  }
+
+  @Test
+  void deserializesMessagesMadeByConsumerActivity() {
+    var serde = new ConsumerOffsetsSerde();
+    var keyDeserializer = serde.deserializer(TOPIC, Serde.Target.KEY);
+    var valueDeserializer = serde.deserializer(TOPIC, Serde.Target.VALUE);
+
+    try (var consumer = createConsumer(consumerGroupName + "-check")) {
+      consumer.subscribe(List.of(ConsumerOffsetsSerde.TOPIC));
+      List<Tuple2<DeserializeResult, DeserializeResult>> polled = new ArrayList<>();
+
+      Awaitility.await()
+          .pollInSameThread()
+          .atMost(Duration.ofMinutes(1))
+          .untilAsserted(() -> {
+            for (var rec : consumer.poll(Duration.ofMillis(200))) {
+              DeserializeResult key = rec.key() != null
+                  ? keyDeserializer.deserialize(null, rec.key().get())
+                  : null;
+              DeserializeResult val = rec.value() != null
+                  ? valueDeserializer.deserialize(null, rec.value().get())
+                  : null;
+              if (key != null && val != null) {
+                polled.add(Tuples.of(key, val));
+              }
+            }
+            assertThat(polled).anyMatch(t -> isCommitMessage(t.getT1(), t.getT2()));
+            assertThat(polled).anyMatch(t -> isGroupMetadataMessage(t.getT1(), t.getT2()));
+          });
+    }
+  }
+
+  // Sample commit record:
+  //
+  // key: {
+  //  "group": "test_Members_3",
+  //  "topic": "test",
+  //  "partition": 0
+  // }
+  //
+  // value:
+  // {
+  //  "offset": 2,
+  //  "leader_epoch": 0,
+  //  "metadata": "",
+  //  "commit_timestamp": 1683112980588
+  // }
+  private boolean isCommitMessage(DeserializeResult key, DeserializeResult value) {
+    var keyJson = toMapFromJsom(key);
+    boolean keyIsOk = consumerGroupName.equals(keyJson.get("group"))
+        && committedTopic.equals(keyJson.get("topic"))
+        && ((Integer) 0).equals(keyJson.get("partition"));
+
+    var valueJson = toMapFromJsom(value);
+    boolean valueIsOk = valueJson.containsKey("offset")
+        && valueJson.get("offset").equals(MSGS_TO_GENERATE)
+        && valueJson.containsKey("commit_timestamp");
+
+    return keyIsOk && valueIsOk;
+  }
+
+  // Sample group metadata record:
+  //
+  // key: {
+  //  "group": "test_Members_3"
+  // }
+  //
+  // value:
+  // {
+  //  "protocol_type": "consumer",
+  //  "generation": 1,
+  //  "protocol": "range",
+  //  "leader": "consumer-test_Members_3-1-5a37876e-e42f-420e-9c7d-6902889bd5dd",
+  //  "current_state_timestamp": 1683112974561,
+  //  "members": [
+  //    {
+  //      "member_id": "consumer-test_Members_3-1-5a37876e-e42f-420e-9c7d-6902889bd5dd",
+  //      "group_instance_id": null,
+  //      "client_id": "consumer-test_Members_3-1",
+  //      "client_host": "/192.168.16.1",
+  //      "rebalance_timeout": 300000,
+  //      "session_timeout": 45000,
+  //      "subscription": "AAEAAAABAAR0ZXN0/////wAAAAA=",
+  //      "assignment": "AAEAAAABAAR0ZXN0AAAAAQAAAAD/////"
+  //    }
+  //  ]
+  // }
+  private boolean isGroupMetadataMessage(DeserializeResult key, DeserializeResult value) {
+    var keyJson = toMapFromJsom(key);
+    boolean keyIsOk = consumerGroupName.equals(keyJson.get("group")) && keyJson.size() == 1;
+
+    var valueJson = toMapFromJsom(value);
+    boolean valueIsOk = valueJson.keySet()
+        .containsAll(Set.of("protocol_type", "generation", "leader", "members"));
+
+    return keyIsOk && valueIsOk;
+  }
+
+  @SneakyThrows
+  private Map<String, Object> toMapFromJsom(DeserializeResult result) {
+    return new JsonMapper().readValue(result.getResult(), Map.class);
+  }
+
+  private static KafkaConsumer<Bytes, Bytes> createConsumer(String groupId) {
+    Properties props = new Properties();
+    props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
+    props.put(ConsumerConfig.CLIENT_ID_CONFIG, groupId);
+    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
+    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
+    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
+    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+    return new KafkaConsumer<>(props);
+  }
+}

+ 1 - 14
kafka-ui-api/src/test/java/com/provectus/kafka/ui/serdes/builtin/ProtobufFileSerdeTest.java

@@ -47,7 +47,6 @@ class ProtobufFileSerdeTest {
   @BeforeEach
   void setUp() throws Exception {
     Map<Path, ProtobufSchema> files = ProtobufFileSerde.Configuration.loadSchemas(
-        Optional.empty(),
         Optional.empty(),
         Optional.of(protoFilesDir())
     );
@@ -107,15 +106,6 @@ class ProtobufFileSerdeTest {
           .isFalse();
     }
 
-    @Test
-    void canBeAutoConfiguredReturnsTrueIfNoProtoFileHasBeenProvided() {
-      PropertyResolver resolver = mock(PropertyResolver.class);
-      when(resolver.getProperty("protobufFile", String.class))
-          .thenReturn(Optional.of("file.proto"));
-      assertThat(Configuration.canBeAutoConfigured(resolver))
-          .isTrue();
-    }
-
     @Test
     void canBeAutoConfiguredReturnsTrueIfProtoFilesHasBeenProvided() {
       PropertyResolver resolver = mock(PropertyResolver.class);
@@ -193,13 +183,10 @@ class ProtobufFileSerdeTest {
     @Test
     void createConfigureFillsDescriptorMappingsWhenProtoFilesListProvided() throws Exception {
       PropertyResolver resolver = mock(PropertyResolver.class);
-      when(resolver.getProperty("protobufFile", String.class))
-          .thenReturn(Optional.of(
-              ResourceUtils.getFile("classpath:protobuf-serde/sensor.proto").getPath()));
-
       when(resolver.getListProperty("protobufFiles", String.class))
           .thenReturn(Optional.of(
               List.of(
+                  ResourceUtils.getFile("classpath:protobuf-serde/sensor.proto").getPath(),
                   ResourceUtils.getFile("classpath:protobuf-serde/address-book.proto").getPath())));
 
       when(resolver.getProperty("protobufMessageName", String.class))

+ 171 - 5
kafka-ui-api/src/test/java/com/provectus/kafka/ui/serdes/builtin/sr/SchemaRegistrySerdeTest.java

@@ -2,13 +2,12 @@ package com.provectus.kafka.ui.serdes.builtin.sr;
 
 import static org.assertj.core.api.Assertions.assertThat;
 
-import com.fasterxml.jackson.core.JsonProcessingException;
 import com.fasterxml.jackson.databind.json.JsonMapper;
 import com.provectus.kafka.ui.serde.api.DeserializeResult;
 import com.provectus.kafka.ui.serde.api.SchemaDescription;
 import com.provectus.kafka.ui.serde.api.Serde;
+import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
 import io.confluent.kafka.schemaregistry.avro.AvroSchema;
-import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
 import io.confluent.kafka.schemaregistry.client.MockSchemaRegistryClient;
 import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
 import java.io.ByteArrayOutputStream;
@@ -54,7 +53,8 @@ class SchemaRegistrySerdeTest {
 
     SchemaDescription schemaDescription = schemaOptional.get();
     assertThat(schemaDescription.getSchema())
-        .contains("{\"$id\":\"int\",\"$schema\":\"https://json-schema.org/draft/2020-12/schema\",\"type\":\"integer\"}");
+        .contains(
+            "{\"$id\":\"int\",\"$schema\":\"https://json-schema.org/draft/2020-12/schema\",\"type\":\"integer\"}");
     assertThat(schemaDescription.getAdditionalProperties())
         .containsOnlyKeys("subject", "schemaId", "latestVersion", "type")
         .containsEntry("subject", subject)
@@ -189,7 +189,8 @@ class SchemaRegistrySerdeTest {
     assertThat(serde.canSerialize(topic, Serde.Target.VALUE)).isFalse();
   }
 
-  private void assertJsonsEqual(String expected, String actual) throws JsonProcessingException {
+  @SneakyThrows
+  private void assertJsonsEqual(String expected, String actual) {
     var mapper = new JsonMapper();
     assertThat(mapper.readTree(actual)).isEqualTo(mapper.readTree(expected));
   }
@@ -211,9 +212,174 @@ class SchemaRegistrySerdeTest {
     GenericDatumWriter<Object> writer = new GenericDatumWriter<>(schema.rawSchema());
     ByteArrayOutputStream output = new ByteArrayOutputStream();
     Encoder encoder = EncoderFactory.get().binaryEncoder(output, null);
-    writer.write(AvroSchemaUtils.toObject(json, schema), encoder);
+    writer.write(JsonAvroConversion.convertJsonToAvro(json, schema.rawSchema()), encoder);
     encoder.flush();
     return output.toByteArray();
   }
 
+  @Test
+  void avroFieldsRepresentationIsConsistentForSerializationAndDeserialization() throws Exception {
+    AvroSchema schema = new AvroSchema(
+        """
+             {
+               "type": "record",
+               "name": "TestAvroRecord",
+               "fields": [
+                 {
+                   "name": "f_int",
+                   "type": "int"
+                 },
+                 {
+                   "name": "f_long",
+                   "type": "long"
+                 },
+                 {
+                   "name": "f_string",
+                   "type": "string"
+                 },
+                 {
+                   "name": "f_boolean",
+                   "type": "boolean"
+                 },
+                 {
+                   "name": "f_float",
+                   "type": "float"
+                 },
+                 {
+                   "name": "f_double",
+                   "type": "double"
+                 },
+                 {
+                   "name": "f_enum",
+                   "type" : {
+                    "type": "enum",
+                    "name": "Suit",
+                    "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
+                   }
+                 },
+                 {
+                  "name": "f_map",
+                  "type": {
+                     "type": "map",
+                     "values" : "string",
+                     "default": {}
+                   }
+                 },
+                 {
+                  "name": "f_union",
+                  "type": ["null", "string", "int" ]
+                 },
+                 {
+                  "name": "f_optional_to_test_not_filled_case",
+                  "type": [ "null", "string"]
+                 },
+                 {
+                     "name" : "f_fixed",
+                     "type" : { "type" : "fixed" ,"size" : 8, "name": "long_encoded" }
+                   },
+                   {
+                     "name" : "f_bytes",
+                     "type": "bytes"
+                   }
+               ]
+            }"""
+    );
+
+    String jsonPayload = """
+        {
+          "f_int": 123,
+          "f_long": 4294967294,
+          "f_string": "string here",
+          "f_boolean": true,
+          "f_float": 123.1,
+          "f_double": 123456.123456,
+          "f_enum": "SPADES",
+          "f_map": { "k1": "string value" },
+          "f_union": { "int": 123 },
+          "f_fixed": "\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0004Ò",
+          "f_bytes": "\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\t)"
+        }
+        """;
+
+    registryClient.register("test-value", schema);
+    assertSerdeCycle("test", jsonPayload);
+  }
+
+  @Test
+  void avroLogicalTypesRepresentationIsConsistentForSerializationAndDeserialization() throws Exception {
+    AvroSchema schema = new AvroSchema(
+        """
+             {
+               "type": "record",
+               "name": "TestAvroRecord",
+               "fields": [
+                 {
+                   "name": "lt_date",
+                   "type": { "type": "int", "logicalType": "date" }
+                 },
+                 {
+                   "name": "lt_uuid",
+                   "type": { "type": "string", "logicalType": "uuid" }
+                 },
+                 {
+                   "name": "lt_decimal",
+                   "type": { "type": "bytes", "logicalType": "decimal", "precision": 22, "scale":10 }
+                 },
+                 {
+                   "name": "lt_time_millis",
+                   "type": { "type": "int", "logicalType": "time-millis"}
+                 },
+                 {
+                   "name": "lt_time_micros",
+                   "type": { "type": "long", "logicalType": "time-micros"}
+                 },
+                 {
+                   "name": "lt_timestamp_millis",
+                   "type": { "type": "long", "logicalType": "timestamp-millis" }
+                 },
+                 {
+                   "name": "lt_timestamp_micros",
+                   "type": { "type": "long", "logicalType": "timestamp-micros" }
+                 },
+                 {
+                   "name": "lt_local_timestamp_millis",
+                   "type": { "type": "long", "logicalType": "local-timestamp-millis" }
+                 },
+                 {
+                   "name": "lt_local_timestamp_micros",
+                   "type": { "type": "long", "logicalType": "local-timestamp-micros" }
+                 }
+               ]
+            }"""
+    );
+
+    String jsonPayload = """
+        {
+          "lt_date":"1991-08-14",
+          "lt_decimal": 2.1617413862327545E11,
+          "lt_time_millis": "10:15:30.001",
+          "lt_time_micros": "10:15:30.123456",
+          "lt_uuid": "a37b75ca-097c-5d46-6119-f0637922e908",
+          "lt_timestamp_millis": "2007-12-03T10:15:30.123Z",
+          "lt_timestamp_micros": "2007-12-03T10:15:30.123456Z",
+          "lt_local_timestamp_millis": "2017-12-03T10:15:30.123",
+          "lt_local_timestamp_micros": "2017-12-03T10:15:30.123456"
+        }
+        """;
+
+    registryClient.register("test-value", schema);
+    assertSerdeCycle("test", jsonPayload);
+  }
+
+  // 1. serialize input json to binary
+  // 2. deserialize from binary
+  // 3. check that deserialized version equal to input
+  void assertSerdeCycle(String topic, String jsonInput) {
+    byte[] serializedBytes = serde.serializer(topic, Serde.Target.VALUE).serialize(jsonInput);
+    var deserializedJson = serde.deserializer(topic, Serde.Target.VALUE)
+        .deserialize(null, serializedBytes)
+        .getResult();
+    assertJsonsEqual(jsonInput, deserializedJson);
+  }
+
 }

+ 24 - 4
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/KafkaConfigSanitizerTest.java

@@ -3,14 +3,16 @@ package com.provectus.kafka.ui.service;
 import static org.assertj.core.api.Assertions.assertThat;
 
 import java.util.Arrays;
-import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
 import org.junit.jupiter.api.Test;
 
 class KafkaConfigSanitizerTest {
 
   @Test
   void doNothingIfEnabledPropertySetToFalse() {
-    final var sanitizer = new KafkaConfigSanitizer(false, Collections.emptyList());
+    final var sanitizer = new KafkaConfigSanitizer(false, List.of());
     assertThat(sanitizer.sanitize("password", "secret")).isEqualTo("secret");
     assertThat(sanitizer.sanitize("sasl.jaas.config", "secret")).isEqualTo("secret");
     assertThat(sanitizer.sanitize("database.password", "secret")).isEqualTo("secret");
@@ -18,7 +20,7 @@ class KafkaConfigSanitizerTest {
 
   @Test
   void obfuscateCredentials() {
-    final var sanitizer = new KafkaConfigSanitizer(true, Collections.emptyList());
+    final var sanitizer = new KafkaConfigSanitizer(true, List.of());
     assertThat(sanitizer.sanitize("sasl.jaas.config", "secret")).isEqualTo("******");
     assertThat(sanitizer.sanitize("consumer.sasl.jaas.config", "secret")).isEqualTo("******");
     assertThat(sanitizer.sanitize("producer.sasl.jaas.config", "secret")).isEqualTo("******");
@@ -36,7 +38,7 @@ class KafkaConfigSanitizerTest {
 
   @Test
   void notObfuscateNormalConfigs() {
-    final var sanitizer = new KafkaConfigSanitizer(true, Collections.emptyList());
+    final var sanitizer = new KafkaConfigSanitizer(true, List.of());
     assertThat(sanitizer.sanitize("security.protocol", "SASL_SSL")).isEqualTo("SASL_SSL");
     final String[] bootstrapServer = new String[] {"test1:9092", "test2:9092"};
     assertThat(sanitizer.sanitize("bootstrap.servers", bootstrapServer)).isEqualTo(bootstrapServer);
@@ -52,4 +54,22 @@ class KafkaConfigSanitizerTest {
     assertThat(sanitizer.sanitize("database.password", "no longer credential"))
             .isEqualTo("no longer credential");
   }
+
+  @Test
+  void sanitizeConnectorConfigDoNotFailOnNullableValues() {
+    Map<String, Object> originalConfig = new HashMap<>();
+    originalConfig.put("password", "secret");
+    originalConfig.put("asIs", "normal");
+    originalConfig.put("nullVal", null);
+
+    var sanitizedConfig = new KafkaConfigSanitizer(true, List.of())
+        .sanitizeConnectorConfig(originalConfig);
+
+    assertThat(sanitizedConfig)
+        .hasSize(3)
+        .containsEntry("password", "******")
+        .containsEntry("asIs", "normal")
+        .containsEntry("nullVal", null);
+  }
+
 }

+ 41 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/MessagesServiceTest.java

@@ -1,5 +1,8 @@
 package com.provectus.kafka.ui.service;
 
+import static com.provectus.kafka.ui.service.MessagesService.execSmartFilterTest;
+import static org.assertj.core.api.Assertions.assertThat;
+
 import com.provectus.kafka.ui.AbstractIntegrationTest;
 import com.provectus.kafka.ui.exception.TopicNotFoundException;
 import com.provectus.kafka.ui.model.ConsumerPosition;
@@ -7,11 +10,13 @@ import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.SeekDirectionDTO;
 import com.provectus.kafka.ui.model.SeekTypeDTO;
+import com.provectus.kafka.ui.model.SmartFilterTestExecutionDTO;
 import com.provectus.kafka.ui.model.TopicMessageDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
 import com.provectus.kafka.ui.serdes.builtin.StringSerde;
 import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 import org.apache.kafka.clients.admin.NewTopic;
 import org.junit.jupiter.api.BeforeEach;
@@ -91,4 +96,40 @@ class MessagesServiceTest extends AbstractIntegrationTest {
     }
   }
 
+  @Test
+  void execSmartFilterTestReturnsExecutionResult() {
+    var params = new SmartFilterTestExecutionDTO()
+        .filterCode("key != null && value != null && headers != null && timestampMs != null && offset != null")
+        .key("1234")
+        .value("{ \"some\" : \"value\" } ")
+        .headers(Map.of("h1", "hv1"))
+        .offset(12345L)
+        .timestampMs(System.currentTimeMillis())
+        .partition(1);
+    assertThat(execSmartFilterTest(params).getResult()).isTrue();
+
+    params.setFilterCode("return false");
+    assertThat(execSmartFilterTest(params).getResult()).isFalse();
+  }
+
+  @Test
+  void execSmartFilterTestReturnsErrorOnFilterApplyError() {
+    var result = execSmartFilterTest(
+        new SmartFilterTestExecutionDTO()
+            .filterCode("return 1/0")
+    );
+    assertThat(result.getResult()).isNull();
+    assertThat(result.getError()).containsIgnoringCase("execution error");
+  }
+
+  @Test
+  void execSmartFilterTestReturnsErrorOnFilterCompilationError() {
+    var result = execSmartFilterTest(
+        new SmartFilterTestExecutionDTO()
+            .filterCode("this is invalid groovy syntax = 1")
+    );
+    assertThat(result.getResult()).isNull();
+    assertThat(result.getError()).containsIgnoringCase("Compilation error");
+  }
+
 }

+ 3 - 1
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SchemaRegistryPaginationTest.java

@@ -9,6 +9,7 @@ import static org.mockito.Mockito.when;
 import com.provectus.kafka.ui.controller.SchemasController;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.SchemaSubjectDTO;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.sr.model.Compatibility;
 import com.provectus.kafka.ui.sr.model.SchemaSubject;
 import com.provectus.kafka.ui.util.AccessControlServiceMock;
@@ -41,7 +42,8 @@ public class SchemaRegistryPaginationTest {
                 new SchemaRegistryService.SubjectWithCompatibilityLevel(
                     new SchemaSubject().subject(a.getArgument(1)), Compatibility.FULL)));
 
-    this.controller = new SchemasController(schemaRegistryService, new AccessControlServiceMock().getMock());
+    this.controller = new SchemasController(schemaRegistryService, new AccessControlServiceMock().getMock(),
+        mock(AuditService.class));
     this.controller.setClustersStorage(clustersStorage);
   }
 

+ 3 - 3
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/TopicsServicePaginationTest.java

@@ -18,6 +18,7 @@ import com.provectus.kafka.ui.model.SortOrderDTO;
 import com.provectus.kafka.ui.model.TopicColumnsToSortDTO;
 import com.provectus.kafka.ui.model.TopicDTO;
 import com.provectus.kafka.ui.service.analyze.TopicAnalysisService;
+import com.provectus.kafka.ui.service.audit.AuditService;
 import com.provectus.kafka.ui.service.rbac.AccessControlService;
 import com.provectus.kafka.ui.util.AccessControlServiceMock;
 import java.util.ArrayList;
@@ -33,7 +34,6 @@ import java.util.stream.IntStream;
 import org.apache.kafka.clients.admin.TopicDescription;
 import org.apache.kafka.common.TopicPartitionInfo;
 import org.junit.jupiter.api.Test;
-import org.springframework.test.util.ReflectionTestUtils;
 import reactor.core.publisher.Mono;
 
 class TopicsServicePaginationTest {
@@ -46,7 +46,7 @@ class TopicsServicePaginationTest {
   private final AccessControlService accessControlService = new AccessControlServiceMock().getMock();
 
   private final TopicsController topicsController = new TopicsController(
-      topicsService, mock(TopicAnalysisService.class), clusterMapper, accessControlService);
+      topicsService, mock(TopicAnalysisService.class), clusterMapper, accessControlService, mock(AuditService.class));
 
   private void init(Map<String, InternalTopic> topicsInCache) {
 
@@ -59,7 +59,7 @@ class TopicsServicePaginationTest {
           List<String> lst = a.getArgument(1);
           return Mono.just(lst.stream().map(topicsInCache::get).collect(Collectors.toList()));
         });
-    ReflectionTestUtils.setField(topicsController, "clustersStorage", clustersStorage);
+    topicsController.setClustersStorage(clustersStorage);
   }
 
   @Test

+ 87 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/audit/AuditIntegrationTest.java

@@ -0,0 +1,87 @@
+package com.provectus.kafka.ui.service.audit;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.fasterxml.jackson.core.JsonProcessingException;
+import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.provectus.kafka.ui.AbstractIntegrationTest;
+import com.provectus.kafka.ui.model.TopicCreationDTO;
+import com.provectus.kafka.ui.model.rbac.Resource;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.serialization.BytesDeserializer;
+import org.apache.kafka.common.serialization.StringDeserializer;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.test.web.reactive.server.WebTestClient;
+import org.testcontainers.shaded.org.awaitility.Awaitility;
+
+public class AuditIntegrationTest extends AbstractIntegrationTest {
+
+  @Autowired
+  private WebTestClient webTestClient;
+
+  @Test
+  void auditRecordWrittenIntoKafkaWhenNewTopicCreated() {
+    String newTopicName = "test_audit_" + UUID.randomUUID();
+
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/topics", LOCAL)
+        .bodyValue(
+            new TopicCreationDTO()
+                .replicationFactor(1)
+                .partitions(1)
+                .name(newTopicName)
+        )
+        .exchange()
+        .expectStatus()
+        .isOk();
+
+    try (var consumer = createConsumer()) {
+      var jsonMapper = new JsonMapper();
+      consumer.subscribe(List.of("__kui-audit-log"));
+      Awaitility.await()
+          .pollInSameThread()
+          .atMost(Duration.ofSeconds(15))
+          .untilAsserted(() -> {
+            var polled = consumer.poll(Duration.ofSeconds(1));
+            assertThat(polled).anySatisfy(kafkaRecord -> {
+              try {
+                AuditRecord record = jsonMapper.readValue(kafkaRecord.value(), AuditRecord.class);
+                assertThat(record.operation()).isEqualTo("createTopic");
+                assertThat(record.resources()).map(AuditRecord.AuditResource::type).contains(Resource.TOPIC);
+                assertThat(record.result().success()).isTrue();
+                assertThat(record.timestamp()).isNotBlank();
+                assertThat(record.clusterName()).isEqualTo(LOCAL);
+                assertThat(record.operationParams())
+                    .isEqualTo(Map.of(
+                        "name", newTopicName,
+                        "partitions", 1,
+                        "replicationFactor", 1,
+                        "configs", Map.of()
+                    ));
+              } catch (JsonProcessingException e) {
+                Assertions.fail();
+              }
+            });
+          });
+    }
+  }
+
+  private KafkaConsumer<?, String> createConsumer() {
+    Properties props = new Properties();
+    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
+    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
+    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+    props.put(ConsumerConfig.GROUP_ID_CONFIG, AuditIntegrationTest.class.getName());
+    return new KafkaConsumer<>(props);
+  }
+
+}

+ 154 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/audit/AuditServiceTest.java

@@ -0,0 +1,154 @@
+package com.provectus.kafka.ui.service.audit;
+
+import static com.provectus.kafka.ui.service.audit.AuditService.createAuditWriter;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyMap;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import com.provectus.kafka.ui.config.ClustersProperties;
+import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.service.ReactiveAdminClient;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Supplier;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Nested;
+import org.junit.jupiter.api.Test;
+import reactor.core.publisher.Mono;
+import reactor.core.publisher.Signal;
+
+class AuditServiceTest {
+
+  @Test
+  void isAuditTopicChecksIfAuditIsEnabledForCluster() {
+    Map<String, AuditWriter> writers = Map.of(
+        "c1", new AuditWriter("с1", "c1topic", null, null),
+        "c2", new AuditWriter("c2", "c2topic", mock(KafkaProducer.class), null)
+    );
+
+    var auditService = new AuditService(writers);
+    assertThat(auditService.isAuditTopic(KafkaCluster.builder().name("notExist").build(), "some"))
+        .isFalse();
+    assertThat(auditService.isAuditTopic(KafkaCluster.builder().name("c1").build(), "c1topic"))
+        .isFalse();
+    assertThat(auditService.isAuditTopic(KafkaCluster.builder().name("c2").build(), "c2topic"))
+        .isTrue();
+  }
+
+  @Test
+  void auditCallsWriterMethodDependingOnSignal() {
+    var auditWriter = mock(AuditWriter.class);
+    var auditService = new AuditService(Map.of("test", auditWriter));
+
+    var cxt = AccessContext.builder().cluster("test").build();
+
+    auditService.audit(cxt, Signal.complete());
+    verify(auditWriter).write(any(), any(), eq(null));
+
+    var th = new Exception("testError");
+    auditService.audit(cxt, Signal.error(th));
+    verify(auditWriter).write(any(), any(), eq(th));
+  }
+
+  @Nested
+  class CreateAuditWriter {
+
+    private final ReactiveAdminClient adminClientMock = mock(ReactiveAdminClient.class);
+    private final Supplier<KafkaProducer<byte[], byte[]>> producerSupplierMock = mock(Supplier.class);
+
+    private final ClustersProperties.Cluster clustersProperties = new ClustersProperties.Cluster();
+
+    private final KafkaCluster cluster = KafkaCluster
+        .builder()
+        .name("test")
+        .originalProperties(clustersProperties)
+        .build();
+
+
+    @BeforeEach
+    void init() {
+      when(producerSupplierMock.get())
+          .thenReturn(mock(KafkaProducer.class));
+    }
+
+    @Test
+    void noWriterIfNoAuditPropsSet() {
+      var maybeWriter = createAuditWriter(cluster, adminClientMock, producerSupplierMock);
+      assertThat(maybeWriter).isEmpty();
+    }
+
+    @Test
+    void setsLoggerIfConsoleLoggingEnabled() {
+      var auditProps = new ClustersProperties.AuditProperties();
+      auditProps.setConsoleAuditEnabled(true);
+      clustersProperties.setAudit(auditProps);
+
+      var maybeWriter = createAuditWriter(cluster, adminClientMock, producerSupplierMock);
+      assertThat(maybeWriter).isPresent();
+
+      var writer = maybeWriter.get();
+      assertThat(writer.consoleLogger()).isNotNull();
+    }
+
+    @Nested
+    class WhenTopicAuditEnabled {
+
+      @BeforeEach
+      void setTopicWriteProperties() {
+        var auditProps = new ClustersProperties.AuditProperties();
+        auditProps.setTopicAuditEnabled(true);
+        auditProps.setTopic("test_audit_topic");
+        auditProps.setAuditTopicsPartitions(3);
+        auditProps.setAuditTopicProperties(Map.of("p1", "v1"));
+        clustersProperties.setAudit(auditProps);
+      }
+
+      @Test
+      void createsProducerIfTopicExists() {
+        when(adminClientMock.listTopics(true))
+            .thenReturn(Mono.just(Set.of("test_audit_topic")));
+
+        var maybeWriter = createAuditWriter(cluster, adminClientMock, producerSupplierMock);
+        assertThat(maybeWriter).isPresent();
+
+        //checking there was no topic creation request
+        verify(adminClientMock, times(0))
+            .createTopic(any(), anyInt(), anyInt(), anyMap());
+
+        var writer = maybeWriter.get();
+        assertThat(writer.producer()).isNotNull();
+        assertThat(writer.targetTopic()).isEqualTo("test_audit_topic");
+      }
+
+      @Test
+      void createsProducerAndTopicIfItIsNotExist() {
+        when(adminClientMock.listTopics(true))
+            .thenReturn(Mono.just(Set.of()));
+
+        when(adminClientMock.createTopic(eq("test_audit_topic"), eq(3), eq(null), anyMap()))
+            .thenReturn(Mono.empty());
+
+        var maybeWriter = createAuditWriter(cluster, adminClientMock, producerSupplierMock);
+        assertThat(maybeWriter).isPresent();
+
+        //verifying topic created
+        verify(adminClientMock).createTopic(eq("test_audit_topic"), eq(3), eq(null), anyMap());
+
+        var writer = maybeWriter.get();
+        assertThat(writer.producer()).isNotNull();
+        assertThat(writer.targetTopic()).isEqualTo("test_audit_topic");
+      }
+
+    }
+  }
+
+
+}

+ 1 - 1
kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/AccessControlServiceMock.java

@@ -16,7 +16,7 @@ public class AccessControlServiceMock {
     when(mock.validateAccess(any())).thenReturn(Mono.empty());
     when(mock.isSchemaAccessible(anyString(), anyString())).thenReturn(Mono.just(true));
 
-    when(mock.isTopicAccessible(any(), anyString())).thenReturn(Mono.just(true));
+    when(mock.filterViewableTopics(any(), any())).then(invocation -> Mono.just(invocation.getArgument(0)));
 
     return mock;
   }

+ 713 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/JsonAvroConversionTest.java

@@ -0,0 +1,713 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import static com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion.convertAvroToJson;
+import static com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion.convertJsonToAvro;
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.fasterxml.jackson.databind.node.BooleanNode;
+import com.fasterxml.jackson.databind.node.DoubleNode;
+import com.fasterxml.jackson.databind.node.FloatNode;
+import com.fasterxml.jackson.databind.node.IntNode;
+import com.fasterxml.jackson.databind.node.LongNode;
+import com.fasterxml.jackson.databind.node.TextNode;
+import com.google.common.primitives.Longs;
+import com.provectus.kafka.ui.exception.JsonAvroConversionException;
+import io.confluent.kafka.schemaregistry.avro.AvroSchema;
+import java.math.BigDecimal;
+import java.nio.ByteBuffer;
+import java.nio.charset.StandardCharsets;
+import java.time.Instant;
+import java.time.LocalDate;
+import java.time.LocalDateTime;
+import java.time.LocalTime;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import lombok.SneakyThrows;
+import org.apache.avro.Schema;
+import org.apache.avro.generic.GenericData;
+import org.junit.jupiter.api.Nested;
+import org.junit.jupiter.api.Test;
+
+class JsonAvroConversionTest {
+
+  // checking conversion from json to KafkaAvroSerializer-compatible avro objects
+  @Nested
+  class FromJsonToAvro {
+
+    @Test
+    void primitiveRoot() {
+      assertThat(convertJsonToAvro("\"str\"", createSchema("\"string\"")))
+          .isEqualTo("str");
+
+      assertThat(convertJsonToAvro("123", createSchema("\"int\"")))
+          .isEqualTo(123);
+
+      assertThat(convertJsonToAvro("123", createSchema("\"long\"")))
+          .isEqualTo(123L);
+
+      assertThat(convertJsonToAvro("123.123", createSchema("\"float\"")))
+          .isEqualTo(123.123F);
+
+      assertThat(convertJsonToAvro("12345.12345", createSchema("\"double\"")))
+          .isEqualTo(12345.12345);
+    }
+
+    @Test
+    void primitiveTypedFields() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "f_int",
+                     "type": "int"
+                   },
+                   {
+                     "name": "f_long",
+                     "type": "long"
+                   },
+                   {
+                     "name": "f_string",
+                     "type": "string"
+                   },
+                   {
+                     "name": "f_boolean",
+                     "type": "boolean"
+                   },
+                   {
+                     "name": "f_float",
+                     "type": "float"
+                   },
+                   {
+                     "name": "f_double",
+                     "type": "double"
+                   },
+                   {
+                     "name": "f_enum",
+                     "type" : {
+                      "type": "enum",
+                      "name": "Suit",
+                      "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
+                     }
+                   },
+                   {
+                     "name" : "f_fixed",
+                     "type" : { "type" : "fixed" ,"size" : 8, "name": "long_encoded" }
+                   },
+                   {
+                     "name" : "f_bytes",
+                     "type": "bytes"
+                   }
+                 ]
+              }"""
+      );
+
+      String jsonPayload = """
+          {
+            "f_int": 123,
+            "f_long": 4294967294,
+            "f_string": "string here",
+            "f_boolean": true,
+            "f_float": 123.1,
+            "f_double": 123456.123456,
+            "f_enum": "SPADES",
+            "f_fixed": "\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0004Ò",
+            "f_bytes": "\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\t)"
+          }
+          """;
+
+      var converted = convertJsonToAvro(jsonPayload, schema);
+      assertThat(converted).isInstanceOf(GenericData.Record.class);
+
+      var record = (GenericData.Record) converted;
+      assertThat(record.get("f_int")).isEqualTo(123);
+      assertThat(record.get("f_long")).isEqualTo(4294967294L);
+      assertThat(record.get("f_string")).isEqualTo("string here");
+      assertThat(record.get("f_boolean")).isEqualTo(true);
+      assertThat(record.get("f_float")).isEqualTo(123.1f);
+      assertThat(record.get("f_double")).isEqualTo(123456.123456);
+      assertThat(record.get("f_enum"))
+          .isEqualTo(
+              new GenericData.EnumSymbol(
+                  schema.getField("f_enum").schema(),
+                  "SPADES"
+              )
+          );
+      assertThat(((GenericData.Fixed) record.get("f_fixed")).bytes()).isEqualTo(Longs.toByteArray(1234L));
+      assertThat(((ByteBuffer) record.get("f_bytes")).array()).isEqualTo(Longs.toByteArray(2345L));
+    }
+
+    @Test
+    void unionRoot() {
+      var schema = createSchema("[ \"null\", \"string\", \"int\" ]");
+
+      var converted = convertJsonToAvro("{\"string\":\"string here\"}", schema);
+      assertThat(converted).isEqualTo("string here");
+
+      converted = convertJsonToAvro("{\"int\": 123}", schema);
+      assertThat(converted).isEqualTo(123);
+
+      converted = convertJsonToAvro("null", schema);
+      assertThat(converted).isEqualTo(null);
+    }
+
+    @Test
+    void unionField() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "namespace": "com.test",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "f_union",
+                     "type": [ "null", "int", "TestAvroRecord"]
+                   }
+                 ]
+              }"""
+      );
+
+      String jsonPayload = "{ \"f_union\": null }";
+
+      var record = (GenericData.Record) convertJsonToAvro(jsonPayload, schema);
+      assertThat(record.get("f_union")).isNull();
+
+      jsonPayload = "{ \"f_union\": { \"int\": 123 } }";
+      record = (GenericData.Record) convertJsonToAvro(jsonPayload, schema);
+      assertThat(record.get("f_union")).isEqualTo(123);
+
+      //short name can be used since there is no clash with other type names
+      jsonPayload = "{ \"f_union\": { \"TestAvroRecord\": { \"f_union\": { \"int\": 123  } } } }";
+      record = (GenericData.Record) convertJsonToAvro(jsonPayload, schema);
+      assertThat(record.get("f_union")).isInstanceOf(GenericData.Record.class);
+      var innerRec = (GenericData.Record) record.get("f_union");
+      assertThat(innerRec.get("f_union")).isEqualTo(123);
+
+      assertThatThrownBy(() ->
+          convertJsonToAvro("{ \"f_union\": { \"NotExistingType\": 123 } }", schema)
+      ).isInstanceOf(JsonAvroConversionException.class);
+    }
+
+    @Test
+    void unionFieldWithTypeNamesClash() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "namespace": "com.test",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "nestedClass",
+                     "type": {
+                       "type": "record",
+                       "namespace": "com.nested",
+                       "name": "TestAvroRecord",
+                       "fields": [
+                         {"name" : "inner_obj_field", "type": "int" }
+                       ]
+                     }
+                   },
+                   {
+                     "name": "f_union",
+                     "type": [ "null", "int", "com.test.TestAvroRecord", "com.nested.TestAvroRecord"]
+                   }
+                 ]
+              }"""
+      );
+      //short name can't can be used since there is a clash with other type names
+      var jsonPayload = "{ \"f_union\": { \"com.test.TestAvroRecord\": { \"f_union\": { \"int\": 123  } } } }";
+      var record = (GenericData.Record) convertJsonToAvro(jsonPayload, schema);
+      assertThat(record.get("f_union")).isInstanceOf(GenericData.Record.class);
+      var innerRec = (GenericData.Record) record.get("f_union");
+      assertThat(innerRec.get("f_union")).isEqualTo(123);
+
+      //short name can't can be used since there is a clash with other type names
+      jsonPayload = "{ \"f_union\": { \"com.nested.TestAvroRecord\": { \"inner_obj_field\":  234 } } }";
+      record = (GenericData.Record) convertJsonToAvro(jsonPayload, schema);
+      assertThat(record.get("f_union")).isInstanceOf(GenericData.Record.class);
+      innerRec = (GenericData.Record) record.get("f_union");
+      assertThat(innerRec.get("inner_obj_field")).isEqualTo(234);
+
+      assertThatThrownBy(() ->
+          convertJsonToAvro("{ \"f_union\": { \"TestAvroRecord\": { \"inner_obj_field\":  234 } } }", schema)
+      ).isInstanceOf(JsonAvroConversionException.class);
+    }
+
+    @Test
+    void mapField() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "long_map",
+                     "type": {
+                       "type": "map",
+                       "values" : "long",
+                       "default": {}
+                     }
+                   },
+                   {
+                     "name": "string_map",
+                     "type": {
+                       "type": "map",
+                       "values" : "string",
+                       "default": {}
+                     }
+                   },
+                   {
+                     "name": "self_ref_map",
+                     "type": {
+                       "type": "map",
+                       "values" : "TestAvroRecord",
+                       "default": {}
+                     }
+                   }
+                 ]
+              }"""
+      );
+
+      String jsonPayload = """
+          {
+            "long_map": {
+              "k1": 123,
+              "k2": 456
+            },
+            "string_map": {
+              "k3": "s1",
+              "k4": "s2"
+            },
+            "self_ref_map": {
+              "k5" : {
+                "long_map": { "_k1": 222 },
+                "string_map": { "_k2": "_s1" }
+              }
+            }
+          }
+          """;
+
+      var record = (GenericData.Record) convertJsonToAvro(jsonPayload, schema);
+      assertThat(record.get("long_map"))
+          .isEqualTo(Map.of("k1", 123L, "k2", 456L));
+      assertThat(record.get("string_map"))
+          .isEqualTo(Map.of("k3", "s1", "k4", "s2"));
+      assertThat(record.get("self_ref_map"))
+          .isNotNull();
+
+      Map<String, Object> selfRefMapField = (Map<String, Object>) record.get("self_ref_map");
+      assertThat(selfRefMapField)
+          .hasSize(1)
+          .hasEntrySatisfying("k5", v -> {
+            assertThat(v).isInstanceOf(GenericData.Record.class);
+            var innerRec = (GenericData.Record) v;
+            assertThat(innerRec.get("long_map"))
+                .isEqualTo(Map.of("_k1", 222L));
+            assertThat(innerRec.get("string_map"))
+                .isEqualTo(Map.of("_k2", "_s1"));
+          });
+    }
+
+    @Test
+    void arrayField() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "f_array",
+                     "type": {
+                        "type": "array",
+                        "items" : "string",
+                        "default": []
+                      }
+                   }
+                 ]
+              }"""
+      );
+
+      String jsonPayload = """
+          {
+            "f_array": [ "e1", "e2" ]
+          }
+          """;
+
+      var record = (GenericData.Record) convertJsonToAvro(jsonPayload, schema);
+      assertThat(record.get("f_array")).isEqualTo(List.of("e1", "e2"));
+    }
+
+    @Test
+    void logicalTypesField() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "lt_date",
+                     "type": { "type": "int", "logicalType": "date" }
+                   },
+                   {
+                     "name": "lt_uuid",
+                     "type": { "type": "string", "logicalType": "uuid" }
+                   },
+                   {
+                     "name": "lt_decimal",
+                     "type": { "type": "bytes", "logicalType": "decimal", "precision": 22, "scale":10 }
+                   },
+                   {
+                     "name": "lt_time_millis",
+                     "type": { "type": "int", "logicalType": "time-millis"}
+                   },
+                   {
+                     "name": "lt_time_micros",
+                     "type": { "type": "long", "logicalType": "time-micros"}
+                   },
+                   {
+                     "name": "lt_timestamp_millis",
+                     "type": { "type": "long", "logicalType": "timestamp-millis" }
+                   },
+                   {
+                     "name": "lt_timestamp_micros",
+                     "type": { "type": "long", "logicalType": "timestamp-micros" }
+                   },
+                   {
+                     "name": "lt_local_timestamp_millis",
+                     "type": { "type": "long", "logicalType": "local-timestamp-millis" }
+                   },
+                   {
+                     "name": "lt_local_timestamp_micros",
+                     "type": { "type": "long", "logicalType": "local-timestamp-micros" }
+                   }
+                 ]
+              }"""
+      );
+
+      String jsonPayload = """
+          {
+            "lt_date":"1991-08-14",
+            "lt_decimal": 2.1617413862327545E11,
+            "lt_time_millis": "10:15:30.001",
+            "lt_time_micros": "10:15:30.123456",
+            "lt_uuid": "a37b75ca-097c-5d46-6119-f0637922e908",
+            "lt_timestamp_millis": "2007-12-03T10:15:30.123Z",
+            "lt_timestamp_micros": "2007-12-13T10:15:30.123456Z",
+            "lt_local_timestamp_millis": "2017-12-03T10:15:30.123",
+            "lt_local_timestamp_micros": "2017-12-13T10:15:30.123456"
+          }
+          """;
+
+      var converted = convertJsonToAvro(jsonPayload, schema);
+      assertThat(converted).isInstanceOf(GenericData.Record.class);
+
+      var record = (GenericData.Record) converted;
+
+      assertThat(record.get("lt_date"))
+          .isEqualTo(LocalDate.of(1991, 8, 14));
+      assertThat(record.get("lt_decimal"))
+          .isEqualTo(new BigDecimal("2.1617413862327545E11"));
+      assertThat(record.get("lt_time_millis"))
+          .isEqualTo(LocalTime.parse("10:15:30.001"));
+      assertThat(record.get("lt_time_micros"))
+          .isEqualTo(LocalTime.parse("10:15:30.123456"));
+      assertThat(record.get("lt_timestamp_millis"))
+          .isEqualTo(Instant.parse("2007-12-03T10:15:30.123Z"));
+      assertThat(record.get("lt_timestamp_micros"))
+          .isEqualTo(Instant.parse("2007-12-13T10:15:30.123456Z"));
+      assertThat(record.get("lt_local_timestamp_millis"))
+          .isEqualTo(LocalDateTime.parse("2017-12-03T10:15:30.123"));
+      assertThat(record.get("lt_local_timestamp_micros"))
+          .isEqualTo(LocalDateTime.parse("2017-12-13T10:15:30.123456"));
+    }
+  }
+
+  // checking conversion of KafkaAvroDeserializer output to JsonNode
+  @Nested
+  class FromAvroToJson {
+
+    @Test
+    void primitiveRoot() {
+      assertThat(convertAvroToJson("str", createSchema("\"string\"")))
+          .isEqualTo(new TextNode("str"));
+
+      assertThat(convertAvroToJson(123, createSchema("\"int\"")))
+          .isEqualTo(new IntNode(123));
+
+      assertThat(convertAvroToJson(123L, createSchema("\"long\"")))
+          .isEqualTo(new LongNode(123));
+
+      assertThat(convertAvroToJson(123.1F, createSchema("\"float\"")))
+          .isEqualTo(new FloatNode(123.1F));
+
+      assertThat(convertAvroToJson(123.1, createSchema("\"double\"")))
+          .isEqualTo(new DoubleNode(123.1));
+
+      assertThat(convertAvroToJson(true, createSchema("\"boolean\"")))
+          .isEqualTo(BooleanNode.valueOf(true));
+
+      assertThat(convertAvroToJson(ByteBuffer.wrap(Longs.toByteArray(123L)), createSchema("\"bytes\"")))
+          .isEqualTo(new TextNode(new String(Longs.toByteArray(123L), StandardCharsets.ISO_8859_1)));
+    }
+
+    @SneakyThrows
+    @Test
+    void primitiveTypedFields() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "f_int",
+                     "type": "int"
+                   },
+                   {
+                     "name": "f_long",
+                     "type": "long"
+                   },
+                   {
+                     "name": "f_string",
+                     "type": "string"
+                   },
+                   {
+                     "name": "f_boolean",
+                     "type": "boolean"
+                   },
+                   {
+                     "name": "f_float",
+                     "type": "float"
+                   },
+                   {
+                     "name": "f_double",
+                     "type": "double"
+                   },
+                   {
+                     "name": "f_enum",
+                     "type" : {
+                      "type": "enum",
+                      "name": "Suit",
+                      "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
+                     }
+                   },
+                   {
+                     "name" : "f_fixed",
+                     "type" : { "type" : "fixed" ,"size" : 8, "name": "long_encoded" }
+                   },
+                   {
+                     "name" : "f_bytes",
+                     "type": "bytes"
+                   }
+                 ]
+              }"""
+      );
+
+      byte[] fixedFieldValue = Longs.toByteArray(1234L);
+      byte[] bytesFieldValue = Longs.toByteArray(2345L);
+
+      GenericData.Record inputRecord = new GenericData.Record(schema);
+      inputRecord.put("f_int", 123);
+      inputRecord.put("f_long", 4294967294L);
+      inputRecord.put("f_string", "string here");
+      inputRecord.put("f_boolean", true);
+      inputRecord.put("f_float", 123.1f);
+      inputRecord.put("f_double", 123456.123456);
+      inputRecord.put("f_enum", new GenericData.EnumSymbol(schema.getField("f_enum").schema(), "SPADES"));
+      inputRecord.put("f_fixed", new GenericData.Fixed(schema.getField("f_fixed").schema(), fixedFieldValue));
+      inputRecord.put("f_bytes", ByteBuffer.wrap(bytesFieldValue));
+
+      String expectedJson = """
+          {
+            "f_int": 123,
+            "f_long": 4294967294,
+            "f_string": "string here",
+            "f_boolean": true,
+            "f_float": 123.1,
+            "f_double": 123456.123456,
+            "f_enum": "SPADES",
+            "f_fixed": "\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\u0004Ò",
+            "f_bytes": "\\u0000\\u0000\\u0000\\u0000\\u0000\\u0000\\t)"
+          }
+          """;
+
+      assertJsonsEqual(expectedJson, convertAvroToJson(inputRecord, schema));
+    }
+
+    @Test
+    void logicalTypesField() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "lt_date",
+                     "type": { "type": "int", "logicalType": "date" }
+                   },
+                   {
+                     "name": "lt_uuid",
+                     "type": { "type": "string", "logicalType": "uuid" }
+                   },
+                   {
+                     "name": "lt_decimal",
+                     "type": { "type": "bytes", "logicalType": "decimal", "precision": 22, "scale":10 }
+                   },
+                   {
+                     "name": "lt_time_millis",
+                     "type": { "type": "int", "logicalType": "time-millis"}
+                   },
+                   {
+                     "name": "lt_time_micros",
+                     "type": { "type": "long", "logicalType": "time-micros"}
+                   },
+                   {
+                     "name": "lt_timestamp_millis",
+                     "type": { "type": "long", "logicalType": "timestamp-millis" }
+                   },
+                   {
+                     "name": "lt_timestamp_micros",
+                     "type": { "type": "long", "logicalType": "timestamp-micros" }
+                   },
+                   {
+                     "name": "lt_local_timestamp_millis",
+                     "type": { "type": "long", "logicalType": "local-timestamp-millis" }
+                   },
+                   {
+                     "name": "lt_local_timestamp_micros",
+                     "type": { "type": "long", "logicalType": "local-timestamp-micros" }
+                   }
+                 ]
+              }"""
+      );
+
+      GenericData.Record inputRecord = new GenericData.Record(schema);
+      inputRecord.put("lt_date", LocalDate.of(1991, 8, 14));
+      inputRecord.put("lt_uuid", UUID.fromString("a37b75ca-097c-5d46-6119-f0637922e908"));
+      inputRecord.put("lt_decimal", new BigDecimal("2.16"));
+      inputRecord.put("lt_time_millis", LocalTime.parse("10:15:30.001"));
+      inputRecord.put("lt_time_micros", LocalTime.parse("10:15:30.123456"));
+      inputRecord.put("lt_timestamp_millis", Instant.parse("2007-12-03T10:15:30.123Z"));
+      inputRecord.put("lt_timestamp_micros", Instant.parse("2007-12-13T10:15:30.123456Z"));
+      inputRecord.put("lt_local_timestamp_millis", LocalDateTime.parse("2017-12-03T10:15:30.123"));
+      inputRecord.put("lt_local_timestamp_micros", LocalDateTime.parse("2017-12-13T10:15:30.123456"));
+
+      String expectedJson = """
+          {
+            "lt_date":"1991-08-14",
+            "lt_uuid": "a37b75ca-097c-5d46-6119-f0637922e908",
+            "lt_decimal": 2.16,
+            "lt_time_millis": "10:15:30.001",
+            "lt_time_micros": "10:15:30.123456",
+            "lt_timestamp_millis": "2007-12-03T10:15:30.123Z",
+            "lt_timestamp_micros": "2007-12-13T10:15:30.123456Z",
+            "lt_local_timestamp_millis": "2017-12-03T10:15:30.123",
+            "lt_local_timestamp_micros": "2017-12-13T10:15:30.123456"
+          }
+          """;
+
+      assertJsonsEqual(expectedJson, convertAvroToJson(inputRecord, schema));
+    }
+
+    @Test
+    void unionField() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "namespace": "com.test",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "f_union",
+                     "type": [ "null", "int", "TestAvroRecord"]
+                   }
+                 ]
+              }"""
+      );
+
+      var r = new GenericData.Record(schema);
+      r.put("f_union", null);
+      assertJsonsEqual(" {}", convertAvroToJson(r, schema));
+
+      r = new GenericData.Record(schema);
+      r.put("f_union", 123);
+      assertJsonsEqual(" { \"f_union\" : { \"int\" : 123 } }", convertAvroToJson(r, schema));
+
+
+      r = new GenericData.Record(schema);
+      var innerRec = new GenericData.Record(schema);
+      innerRec.put("f_union", 123);
+      r.put("f_union", innerRec);
+      // short type name can be set since there is NO clash with other types name
+      assertJsonsEqual(
+          " { \"f_union\" : { \"TestAvroRecord\" : { \"f_union\" : { \"int\" : 123 } } } }",
+          convertAvroToJson(r, schema)
+      );
+    }
+
+    @Test
+    void unionFieldWithInnerTypesNamesClash() {
+      var schema = createSchema(
+          """
+               {
+                 "type": "record",
+                 "namespace": "com.test",
+                 "name": "TestAvroRecord",
+                 "fields": [
+                   {
+                     "name": "nestedClass",
+                     "type": {
+                       "type": "record",
+                       "namespace": "com.nested",
+                       "name": "TestAvroRecord",
+                       "fields": [
+                         {"name" : "inner_obj_field", "type": "int" }
+                       ]
+                     }
+                   },
+                   {
+                     "name": "f_union",
+                     "type": [ "null", "int", "com.test.TestAvroRecord", "com.nested.TestAvroRecord"]
+                   }
+                 ]
+              }"""
+      );
+
+      var r = new GenericData.Record(schema);
+      var innerRec = new GenericData.Record(schema);
+      innerRec.put("f_union", 123);
+      r.put("f_union", innerRec);
+      // full type name should be set since there is a clash with other type name
+      assertJsonsEqual(
+          " { \"f_union\" : { \"com.test.TestAvroRecord\" : { \"f_union\" : { \"int\" : 123 } } } }",
+          convertAvroToJson(r, schema)
+      );
+    }
+
+  }
+
+  private Schema createSchema(String schema) {
+    return new AvroSchema(schema).rawSchema();
+  }
+
+  @SneakyThrows
+  private void assertJsonsEqual(String expectedJson, JsonNode actual) {
+    var mapper = new JsonMapper();
+    assertThat(actual.toPrettyString())
+        .isEqualTo(mapper.readTree(expectedJson).toPrettyString());
+  }
+
+}

+ 68 - 2
kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml

@@ -625,6 +625,25 @@ paths:
               schema:
                 $ref: '#/components/schemas/TopicSerdeSuggestion'
 
+  /api/smartfilters/testexecutions:
+    put:
+      tags:
+        - Messages
+      summary: executeSmartFilterTest
+      operationId: executeSmartFilterTest
+      requestBody:
+        content:
+          application/json:
+            schema:
+              $ref: '#/components/schemas/SmartFilterTestExecution'
+      responses:
+        200:
+          description: OK
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/SmartFilterTestExecutionResult'
+
 
   /api/clusters/{clusterName}/topics/{topicName}/messages:
     get:
@@ -2558,7 +2577,7 @@ components:
           $ref: "#/components/schemas/ConsumerGroupState"
         coordinator:
           $ref: "#/components/schemas/Broker"
-        messagesBehind:
+        consumerLag:
           type: integer
           format: int64
           description: null if consumer group has no offsets committed
@@ -2584,6 +2603,37 @@ components:
           items:
             $ref: '#/components/schemas/ConsumerGroup'
 
+    SmartFilterTestExecution:
+      type: object
+      required: [filterCode]
+      properties:
+        filterCode:
+          type: string
+        key:
+          type: string
+        value:
+          type: string
+        headers:
+          type: object
+          additionalProperties:
+            type: string
+        partition:
+          type: integer
+        offset:
+          type: integer
+          format: int64
+        timestampMs:
+          type: integer
+          format: int64
+
+    SmartFilterTestExecutionResult:
+      type: object
+      properties:
+        result:
+          type: boolean
+        error:
+          type: string
+
     CreateTopicMessage:
       type: object
       properties:
@@ -2776,7 +2826,7 @@ components:
         endOffset:
           type: integer
           format: int64
-        messagesBehind:
+        consumerLag:
           type: integer
           format: int64
           description: null if consumer group has no offsets committed
@@ -3465,6 +3515,7 @@ components:
         - CONNECT
         - KSQL
         - ACL
+        - AUDIT
 
     KafkaAcl:
       type: object
@@ -3825,3 +3876,18 @@ components:
                       pollingThrottleRate:
                         type: integer
                         format: int64
+                      audit:
+                        type: object
+                        properties:
+                          topic:
+                            type: string
+                          auditTopicsPartitions:
+                            type: integer
+                          topicAuditEnabled:
+                            type: boolean
+                          consoleAuditEnabled:
+                            type: boolean
+                          auditTopicProperties:
+                            type: object
+                            additionalProperties:
+                              type: string

+ 1 - 1
kafka-ui-e2e-checks/pom.xml

@@ -267,7 +267,7 @@
                     <plugin>
                         <groupId>org.apache.maven.plugins</groupId>
                         <artifactId>maven-checkstyle-plugin</artifactId>
-                        <version>3.1.2</version>
+                        <version>3.3.0</version>
                         <dependencies>
                             <dependency>
                                 <groupId>com.puppycrawl.tools</groupId>

+ 5 - 0
kafka-ui-e2e-checks/src/main/java/com/provectus/kafka/ui/pages/BasePage.java

@@ -28,6 +28,7 @@ public abstract class BasePage extends WebUtils {
   protected SelenideElement confirmBtn = $x("//button[contains(text(),'Confirm')]");
   protected SelenideElement cancelBtn = $x("//button[contains(text(),'Cancel')]");
   protected SelenideElement backBtn = $x("//button[contains(text(),'Back')]");
+  protected SelenideElement previousBtn = $x("//button[contains(text(),'Previous')]");
   protected SelenideElement nextBtn = $x("//button[contains(text(),'Next')]");
   protected ElementsCollection ddlOptions = $$x("//li[@value]");
   protected ElementsCollection gridItems = $$x("//tr[@class]");
@@ -75,6 +76,10 @@ public abstract class BasePage extends WebUtils {
     clickByJavaScript(backBtn);
   }
 
+  protected void clickPreviousBtn() {
+    clickByJavaScript(previousBtn);
+  }
+
   protected void setJsonInputValue(SelenideElement jsonInput, String jsonConfig) {
     sendKeysByActions(jsonInput, jsonConfig.replace("  ", ""));
     new Actions(WebDriverRunner.getWebDriver())

+ 54 - 2
kafka-ui-e2e-checks/src/main/java/com/provectus/kafka/ui/pages/brokers/BrokersConfigTab.java

@@ -66,6 +66,13 @@ public class BrokersConfigTab extends BasePage {
     return this;
   }
 
+  @Step
+  public BrokersConfigTab clickPreviousButton() {
+    clickPreviousBtn();
+    waitUntilSpinnerDisappear(1);
+    return this;
+  }
+
   private List<BrokersConfigTab.BrokersConfigItem> initGridItems() {
     List<BrokersConfigTab.BrokersConfigItem> gridItemList = new ArrayList<>();
     gridItems.shouldHave(CollectionCondition.sizeGreaterThan(0))
@@ -104,13 +111,58 @@ public class BrokersConfigTab extends BasePage {
     }
 
     @Step
-    public void edit() {
-      element.$x("./td[2]//button").shouldBe(Condition.enabled).click();
+    public BrokersConfigItem setValue(String value) {
+      sendKeysAfterClear(getValueFld(), value);
+      return this;
+    }
+
+    @Step
+    public SelenideElement getValueFld() {
+      return element.$x("./td[2]//input");
+    }
+
+    @Step
+    public SelenideElement getSaveBtn() {
+      return element.$x("./td[2]//button[@aria-label='confirmAction']");
+    }
+
+    @Step
+    public SelenideElement getCancelBtn() {
+      return element.$x("./td[2]//button[@aria-label='cancelAction']");
+    }
+
+    @Step
+    public SelenideElement getEditBtn() {
+      return element.$x("./td[2]//button[@aria-label='editAction']");
+    }
+
+    @Step
+    public BrokersConfigItem clickSaveBtn() {
+      getSaveBtn().shouldBe(Condition.enabled).click();
+      return this;
+    }
+
+    @Step
+    public BrokersConfigItem clickCancelBtn() {
+      getCancelBtn().shouldBe(Condition.enabled).click();
+      return this;
+    }
+
+    @Step
+    public BrokersConfigItem clickEditBtn() {
+      getEditBtn().shouldBe(Condition.enabled).click();
+      return this;
     }
 
     @Step
     public String getSource() {
       return element.$x("./td[3]").getText().trim();
     }
+
+    @Step
+    public BrokersConfigItem clickConfirm() {
+      clickConfirmButton();
+      return this;
+    }
   }
 }

+ 15 - 0
kafka-ui-e2e-checks/src/main/java/com/provectus/kafka/ui/utilities/StringUtils.java

@@ -0,0 +1,15 @@
+package com.provectus.kafka.ui.utilities;
+
+import java.util.stream.IntStream;
+import lombok.extern.slf4j.Slf4j;
+
+@Slf4j
+public class StringUtils {
+
+  public static String getMixedCase(String original) {
+    return IntStream.range(0, original.length())
+        .mapToObj(i -> i % 2 == 0 ? Character.toUpperCase(original.charAt(i)) : original.charAt(i))
+        .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append)
+        .toString();
+  }
+}

+ 8 - 30
kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/manualsuite/backlog/SmokeBacklog.java

@@ -1,6 +1,5 @@
 package com.provectus.kafka.ui.manualsuite.backlog;
 
-import static com.provectus.kafka.ui.qasesuite.BaseQaseTest.BROKERS_SUITE_ID;
 import static com.provectus.kafka.ui.qasesuite.BaseQaseTest.SCHEMAS_SUITE_ID;
 import static com.provectus.kafka.ui.qasesuite.BaseQaseTest.TOPICS_PROFILE_SUITE_ID;
 import static com.provectus.kafka.ui.qasesuite.BaseQaseTest.TOPICS_SUITE_ID;
@@ -15,80 +14,59 @@ import org.testng.annotations.Test;
 
 public class SmokeBacklog extends BaseManualTest {
 
-  @Automation(state = TO_BE_AUTOMATED)
-  @Suite(id = BROKERS_SUITE_ID)
-  @QaseId(332)
-  @Test
-  public void testCaseA() {
-  }
-
   @Automation(state = TO_BE_AUTOMATED)
   @Suite(id = TOPICS_PROFILE_SUITE_ID)
   @QaseId(335)
   @Test
-  public void testCaseB() {
+  public void testCaseA() {
   }
 
   @Automation(state = TO_BE_AUTOMATED)
   @Suite(id = TOPICS_PROFILE_SUITE_ID)
   @QaseId(336)
   @Test
-  public void testCaseC() {
+  public void testCaseB() {
   }
 
   @Automation(state = TO_BE_AUTOMATED)
   @Suite(id = TOPICS_PROFILE_SUITE_ID)
   @QaseId(343)
   @Test
-  public void testCaseD() {
+  public void testCaseC() {
   }
 
   @Automation(state = TO_BE_AUTOMATED)
   @Suite(id = SCHEMAS_SUITE_ID)
   @QaseId(345)
   @Test
-  public void testCaseE() {
+  public void testCaseD() {
   }
 
   @Automation(state = TO_BE_AUTOMATED)
   @Suite(id = SCHEMAS_SUITE_ID)
   @QaseId(346)
   @Test
-  public void testCaseF() {
+  public void testCaseE() {
   }
 
   @Automation(state = TO_BE_AUTOMATED)
   @Suite(id = TOPICS_PROFILE_SUITE_ID)
   @QaseId(347)
   @Test
-  public void testCaseG() {
-  }
-
-  @Automation(state = TO_BE_AUTOMATED)
-  @Suite(id = BROKERS_SUITE_ID)
-  @QaseId(348)
-  @Test
-  public void testCaseH() {
-  }
-
-  @Automation(state = TO_BE_AUTOMATED)
-  @Suite(id = BROKERS_SUITE_ID)
-  @QaseId(350)
-  @Test
-  public void testCaseI() {
+  public void testCaseF() {
   }
 
   @Automation(state = NOT_AUTOMATED)
   @Suite(id = TOPICS_SUITE_ID)
   @QaseId(50)
   @Test
-  public void testCaseJ() {
+  public void testCaseG() {
   }
 
   @Automation(state = NOT_AUTOMATED)
   @Suite(id = SCHEMAS_SUITE_ID)
   @QaseId(351)
   @Test
-  public void testCaseK() {
+  public void testCaseH() {
   }
 }

+ 108 - 7
kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/smokesuite/brokers/BrokersTest.java

@@ -1,6 +1,7 @@
 package com.provectus.kafka.ui.smokesuite.brokers;
 
 import static com.provectus.kafka.ui.pages.brokers.BrokersDetails.DetailsTab.CONFIGS;
+import static com.provectus.kafka.ui.utilities.StringUtils.getMixedCase;
 import static com.provectus.kafka.ui.variables.Expected.BROKER_SOURCE_INFO_TOOLTIP;
 
 import com.codeborne.selenide.Condition;
@@ -8,9 +9,11 @@ import com.provectus.kafka.ui.BaseTest;
 import com.provectus.kafka.ui.pages.brokers.BrokersConfigTab;
 import io.qameta.allure.Issue;
 import io.qase.api.annotation.QaseId;
+import java.util.List;
 import org.testng.Assert;
 import org.testng.annotations.Ignore;
 import org.testng.annotations.Test;
+import org.testng.asserts.SoftAssert;
 
 public class BrokersTest extends BaseTest {
 
@@ -49,11 +52,11 @@ public class BrokersTest extends BaseTest {
   @Issue("https://github.com/provectus/kafka-ui/issues/3347")
   @QaseId(330)
   @Test
-  public void brokersConfigSearchCheck() {
+  public void brokersConfigFirstPageSearchCheck() {
     navigateToBrokersAndOpenDetails(DEFAULT_BROKER_ID);
     brokersDetails
         .openDetailsTab(CONFIGS);
-    String anyConfigKey = brokersConfigTab
+    String anyConfigKeyFirstPage = brokersConfigTab
         .getAllConfigs().stream()
         .findAny().orElseThrow()
         .getKey();
@@ -61,14 +64,74 @@ public class BrokersTest extends BaseTest {
         .clickNextButton();
     Assert.assertFalse(brokersConfigTab.getAllConfigs().stream()
             .map(BrokersConfigTab.BrokersConfigItem::getKey)
-            .toList().contains(anyConfigKey),
-        String.format("getAllConfigs().contains(%s)", anyConfigKey));
+            .toList().contains(anyConfigKeyFirstPage),
+        String.format("getAllConfigs().contains(%s)", anyConfigKeyFirstPage));
     brokersConfigTab
-        .searchConfig(anyConfigKey);
+        .searchConfig(anyConfigKeyFirstPage);
     Assert.assertTrue(brokersConfigTab.getAllConfigs().stream()
             .map(BrokersConfigTab.BrokersConfigItem::getKey)
-            .toList().contains(anyConfigKey),
-        String.format("getAllConfigs().contains(%s)", anyConfigKey));
+            .toList().contains(anyConfigKeyFirstPage),
+        String.format("getAllConfigs().contains(%s)", anyConfigKeyFirstPage));
+  }
+
+  @Ignore
+  @Issue("https://github.com/provectus/kafka-ui/issues/3347")
+  @QaseId(350)
+  @Test
+  public void brokersConfigSecondPageSearchCheck() {
+    navigateToBrokersAndOpenDetails(DEFAULT_BROKER_ID);
+    brokersDetails
+        .openDetailsTab(CONFIGS);
+    brokersConfigTab
+        .clickNextButton();
+    String anyConfigKeySecondPage = brokersConfigTab
+        .getAllConfigs().stream()
+        .findAny().orElseThrow()
+        .getKey();
+    brokersConfigTab
+        .clickPreviousButton();
+    Assert.assertFalse(brokersConfigTab.getAllConfigs().stream()
+            .map(BrokersConfigTab.BrokersConfigItem::getKey)
+            .toList().contains(anyConfigKeySecondPage),
+        String.format("getAllConfigs().contains(%s)", anyConfigKeySecondPage));
+    brokersConfigTab
+        .searchConfig(anyConfigKeySecondPage);
+    Assert.assertTrue(brokersConfigTab.getAllConfigs().stream()
+            .map(BrokersConfigTab.BrokersConfigItem::getKey)
+            .toList().contains(anyConfigKeySecondPage),
+        String.format("getAllConfigs().contains(%s)", anyConfigKeySecondPage));
+  }
+
+  @Ignore
+  @Issue("https://github.com/provectus/kafka-ui/issues/3347")
+  @QaseId(348)
+  @Test
+  public void brokersConfigCaseInsensitiveSearchCheck() {
+    navigateToBrokersAndOpenDetails(DEFAULT_BROKER_ID);
+    brokersDetails
+        .openDetailsTab(CONFIGS);
+    String anyConfigKeyFirstPage = brokersConfigTab
+        .getAllConfigs().stream()
+        .findAny().orElseThrow()
+        .getKey();
+    brokersConfigTab
+        .clickNextButton();
+    Assert.assertFalse(brokersConfigTab.getAllConfigs().stream()
+            .map(BrokersConfigTab.BrokersConfigItem::getKey)
+            .toList().contains(anyConfigKeyFirstPage),
+        String.format("getAllConfigs().contains(%s)", anyConfigKeyFirstPage));
+    SoftAssert softly = new SoftAssert();
+    List.of(anyConfigKeyFirstPage.toLowerCase(), anyConfigKeyFirstPage.toUpperCase(),
+            getMixedCase(anyConfigKeyFirstPage))
+        .forEach(configCase -> {
+          brokersConfigTab
+              .searchConfig(configCase);
+          softly.assertTrue(brokersConfigTab.getAllConfigs().stream()
+                  .map(BrokersConfigTab.BrokersConfigItem::getKey)
+                  .toList().contains(anyConfigKeyFirstPage),
+              String.format("getAllConfigs().contains(%s)", configCase));
+        });
+    softly.assertAll();
   }
 
   @QaseId(331)
@@ -82,4 +145,42 @@ public class BrokersTest extends BaseTest {
         .getSourceInfoTooltipText();
     Assert.assertEquals(sourceInfoTooltip, BROKER_SOURCE_INFO_TOOLTIP, "brokerSourceInfoTooltip");
   }
+
+  @QaseId(332)
+  @Test
+  public void brokersConfigEditCheck() {
+    navigateToBrokersAndOpenDetails(DEFAULT_BROKER_ID);
+    brokersDetails
+        .openDetailsTab(CONFIGS);
+    String configKey = "log.cleaner.min.compaction.lag.ms";
+    BrokersConfigTab.BrokersConfigItem configItem = brokersConfigTab
+        .searchConfig(configKey)
+        .getConfig(configKey);
+    int defaultValue = Integer.parseInt(configItem.getValue());
+    configItem
+        .clickEditBtn();
+    SoftAssert softly = new SoftAssert();
+    softly.assertTrue(configItem.getSaveBtn().isDisplayed(), "getSaveBtn().isDisplayed()");
+    softly.assertTrue(configItem.getCancelBtn().isDisplayed(), "getCancelBtn().isDisplayed()");
+    softly.assertTrue(configItem.getValueFld().isEnabled(), "getValueFld().isEnabled()");
+    softly.assertAll();
+    int newValue = defaultValue + 1;
+    configItem
+        .setValue(String.valueOf(newValue))
+        .clickCancelBtn();
+    Assert.assertEquals(Integer.parseInt(configItem.getValue()), defaultValue, "getValue()");
+    configItem
+        .clickEditBtn()
+        .setValue(String.valueOf(newValue))
+        .clickSaveBtn()
+        .clickConfirm();
+    configItem = brokersConfigTab
+        .searchConfig(configKey)
+        .getConfig(configKey);
+    softly.assertFalse(configItem.getSaveBtn().isDisplayed(), "getSaveBtn().isDisplayed()");
+    softly.assertFalse(configItem.getCancelBtn().isDisplayed(), "getCancelBtn().isDisplayed()");
+    softly.assertTrue(configItem.getEditBtn().isDisplayed(), "getEditBtn().isDisplayed()");
+    softly.assertEquals(Integer.parseInt(configItem.getValue()), newValue, "getValue()");
+    softly.assertAll();
+  }
 }

+ 3 - 1
kafka-ui-react-app/package.json

@@ -9,7 +9,7 @@
     "@hookform/resolvers": "^2.7.1",
     "@microsoft/fetch-event-source": "^2.0.1",
     "@reduxjs/toolkit": "^1.8.3",
-    "@szhsin/react-menu": "^3.1.1",
+    "@szhsin/react-menu": "^3.5.3",
     "@tanstack/react-query": "^4.0.5",
     "@tanstack/react-table": "^8.5.10",
     "@testing-library/react": "^14.0.0",
@@ -24,6 +24,7 @@
     "json-schema-faker": "^0.5.0-rcv.44",
     "jsonpath-plus": "^7.2.0",
     "lodash": "^4.17.21",
+    "lossless-json": "^2.0.8",
     "pretty-ms": "7.0.1",
     "react": "^18.1.0",
     "react-ace": "^10.1.0",
@@ -71,6 +72,7 @@
     "@testing-library/user-event": "^14.4.3",
     "@types/eventsource": "^1.1.8",
     "@types/lodash": "^4.14.172",
+    "@types/lossless-json": "^1.0.1",
     "@types/node": "^16.4.13",
     "@types/react": "^18.0.9",
     "@types/react-datepicker": "^4.8.0",

+ 26 - 12
kafka-ui-react-app/pnpm-lock.yaml

@@ -10,7 +10,7 @@ specifiers:
   '@reduxjs/toolkit': ^1.8.3
   '@swc/core': ^1.3.36
   '@swc/jest': ^0.2.24
-  '@szhsin/react-menu': ^3.1.1
+  '@szhsin/react-menu': ^3.5.3
   '@tanstack/react-query': ^4.0.5
   '@tanstack/react-table': ^8.5.10
   '@testing-library/dom': ^9.0.0
@@ -19,6 +19,7 @@ specifiers:
   '@testing-library/user-event': ^14.4.3
   '@types/eventsource': ^1.1.8
   '@types/lodash': ^4.14.172
+  '@types/lossless-json': ^1.0.1
   '@types/node': ^16.4.13
   '@types/react': ^18.0.9
   '@types/react-datepicker': ^4.8.0
@@ -55,6 +56,7 @@ specifiers:
   json-schema-faker: ^0.5.0-rcv.44
   jsonpath-plus: ^7.2.0
   lodash: ^4.17.21
+  lossless-json: ^2.0.8
   prettier: ^2.8.4
   pretty-ms: 7.0.1
   react: ^18.1.0
@@ -89,14 +91,14 @@ dependencies:
   '@hookform/resolvers': 2.8.9_react-hook-form@7.43.1
   '@microsoft/fetch-event-source': 2.0.1
   '@reduxjs/toolkit': 1.8.3_ctm756ikdwcjcvyfxxwskzbr6q
-  '@szhsin/react-menu': 3.1.1_ef5jwxihqo6n7gxfmzogljlgcm
+  '@szhsin/react-menu': 3.5.3_ef5jwxihqo6n7gxfmzogljlgcm
   '@tanstack/react-query': 4.0.5_ef5jwxihqo6n7gxfmzogljlgcm
   '@tanstack/react-table': 8.5.10_ef5jwxihqo6n7gxfmzogljlgcm
   '@testing-library/react': 14.0.0_ef5jwxihqo6n7gxfmzogljlgcm
   '@types/testing-library__jest-dom': 5.14.5
   ace-builds: 1.7.1
   ajv: 8.8.2
-  ajv-formats: 2.1.1
+  ajv-formats: 2.1.1_ajv@8.8.2
   classnames: 2.3.1
   fetch-mock: 9.11.0
   jest: 29.5.0_6m7kcbkkzjz4ln6z66tlzx44we
@@ -104,6 +106,7 @@ dependencies:
   json-schema-faker: 0.5.0-rcv.44
   jsonpath-plus: 7.2.0
   lodash: 4.17.21
+  lossless-json: 2.0.8
   pretty-ms: 7.0.1
   react: 18.1.0
   react-ace: 10.1.0_ef5jwxihqo6n7gxfmzogljlgcm
@@ -136,6 +139,7 @@ devDependencies:
   '@testing-library/user-event': 14.4.3_@testing-library+dom@9.0.0
   '@types/eventsource': 1.1.8
   '@types/lodash': 4.14.177
+  '@types/lossless-json': 1.0.1
   '@types/node': 16.11.7
   '@types/react': 18.0.9
   '@types/react-datepicker': 4.10.0_react@18.1.0
@@ -1532,8 +1536,8 @@ packages:
       jsonc-parser: 3.2.0
     dev: true
 
-  /@szhsin/react-menu/3.1.1_ef5jwxihqo6n7gxfmzogljlgcm:
-    resolution: {integrity: sha512-IdHLyH61M+KqjTrvqglKo7JnbC0GIkg4OCtlXBxQPEjx/ecR5g0Iycqm+SG3rObEoniLZEz32iJkefve/LAHMA==}
+  /@szhsin/react-menu/3.5.3_ef5jwxihqo6n7gxfmzogljlgcm:
+    resolution: {integrity: sha512-jxo8oaRwxmVjUzkyOi/ZJiXaZiuFPMIxFzyJdUKfnhBLYiEOVTU9M2CiPuEkirILoareR2GJj2K3y8a81CBPlw==}
     peerDependencies:
       react: '>=16.14.0'
       react-dom: '>=16.14.0'
@@ -1541,7 +1545,7 @@ packages:
       prop-types: 15.8.1
       react: 18.1.0
       react-dom: 18.1.0_react@18.1.0
-      react-transition-state: 1.1.4_ef5jwxihqo6n7gxfmzogljlgcm
+      react-transition-state: 1.1.5_ef5jwxihqo6n7gxfmzogljlgcm
     dev: false
 
   /@tanstack/query-core/4.0.5:
@@ -1770,6 +1774,10 @@ packages:
     resolution: {integrity: sha512-0fDwydE2clKe9MNfvXHBHF9WEahRuj+msTuQqOmAApNORFvhMYZKNGGJdCzuhheVjMps/ti0Ak/iJPACMaevvw==}
     dev: true
 
+  /@types/lossless-json/1.0.1:
+    resolution: {integrity: sha512-zPE8kmpeL5/6L5gtTQHSOkAW/OSYYNTDRt6/2oEgLO1Zd3Rj5WVDoMloTtLJxQJhZGLGbL4pktKSh3NbzdaWdw==}
+    dev: true
+
   /@types/node/16.11.7:
     resolution: {integrity: sha512-QB5D2sqfSjCmTuWcBWyJ+/44bcjO7VbjSbOE0ucoVbAsSNQc4Lt6QkgkVXkTDwkL4z/beecZNDvVX15D4P8Jbw==}
 
@@ -2050,8 +2058,10 @@ packages:
       - supports-color
     dev: true
 
-  /ajv-formats/2.1.1:
+  /ajv-formats/2.1.1_ajv@8.8.2:
     resolution: {integrity: sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==}
+    peerDependencies:
+      ajv: ^8.0.0
     peerDependenciesMeta:
       ajv:
         optional: true
@@ -2734,8 +2744,8 @@ packages:
       ms: 2.1.2
       supports-color: 5.5.0
 
-  /decimal.js/10.3.1:
-    resolution: {integrity: sha512-V0pfhfr8suzyPGOx3nmq4aHqabehUZn6Ch9kyFpV79TGDTWFmHqUqXdabR7QHqxzrYolF4+tVmJhUG4OURg5dQ==}
+  /decimal.js/10.4.3:
+    resolution: {integrity: sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA==}
     dev: true
 
   /dedent/0.7.0:
@@ -4649,7 +4659,7 @@ packages:
       cssom: 0.5.0
       cssstyle: 2.3.0
       data-urls: 3.0.2
-      decimal.js: 10.3.1
+      decimal.js: 10.4.3
       domexception: 4.0.0
       escodegen: 2.0.0
       form-data: 4.0.0
@@ -4841,6 +4851,10 @@ packages:
     dependencies:
       js-tokens: 4.0.0
 
+  /lossless-json/2.0.8:
+    resolution: {integrity: sha512-7/GaZldUc7H5oNZlSk6bF06cRbtA7oF8zWXwbfMZm8yrYC2debx0KvWTBbQIbj6fh08LsXTWg+YtHJshXgYKow==}
+    dev: false
+
   /lru-cache/6.0.0:
     resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==}
     engines: {node: '>=10'}
@@ -5562,8 +5576,8 @@ packages:
       react: 18.1.0
     dev: false
 
-  /react-transition-state/1.1.4_ef5jwxihqo6n7gxfmzogljlgcm:
-    resolution: {integrity: sha512-6nQLWWx95gYazCm6OdtD1zGbRiirvVXPrDtHAGsYb4xs9spMM7bA8Vx77KCpjL8PJ8qz1lXFGz2PTboCSvt7iw==}
+  /react-transition-state/1.1.5_ef5jwxihqo6n7gxfmzogljlgcm:
+    resolution: {integrity: sha512-ITY2mZqc2dWG2eitJkYNdcSFW8aKeOlkL2A/vowRrLL8GH3J6Re/SpD/BLvQzrVOTqjsP0b5S9N10vgNNzwMUQ==}
     peerDependencies:
       react: '>=16.8.0'
       react-dom: '>=16.8.0'

+ 5 - 3
kafka-ui-react-app/src/components/App.tsx

@@ -1,4 +1,4 @@
-import React, { Suspense } from 'react';
+import React, { Suspense, useContext } from 'react';
 import { Routes, Route, Navigate } from 'react-router-dom';
 import {
   accessErrorPage,
@@ -18,6 +18,7 @@ import { Toaster } from 'react-hot-toast';
 import GlobalCSS from 'components/globalCss';
 import * as S from 'components/App.styled';
 import ClusterConfigForm from 'widgets/ClusterConfigForm';
+import { ThemeModeContext } from 'components/contexts/ThemeModeContext';
 
 import ConfirmationModal from './common/ConfirmationModal/ConfirmationModal';
 import { ConfirmContextProvider } from './contexts/ConfirmContext';
@@ -30,6 +31,7 @@ const queryClient = new QueryClient({
   defaultOptions: {
     queries: {
       suspense: true,
+      networkMode: 'offlineFirst',
       onError(error) {
         showServerError(error as Response);
       },
@@ -42,7 +44,7 @@ const queryClient = new QueryClient({
   },
 });
 const App: React.FC = () => {
-  const [isDarkMode, setDarkMode] = React.useState<boolean>(false);
+  const { isDarkMode } = useContext(ThemeModeContext);
 
   return (
     <QueryClientProvider client={queryClient}>
@@ -53,7 +55,7 @@ const App: React.FC = () => {
               <ConfirmContextProvider>
                 <GlobalCSS />
                 <S.Layout>
-                  <PageContainer setDarkMode={setDarkMode}>
+                  <PageContainer>
                     <Routes>
                       {['/', '/ui', '/ui/clusters'].map((path) => (
                         <Route

+ 4 - 1
kafka-ui-react-app/src/components/Brokers/Broker/Broker.tsx

@@ -44,7 +44,10 @@ const Broker: React.FC = () => {
       <Metrics.Wrapper>
         <Metrics.Section>
           <Metrics.Indicator label="Segment Size">
-            <BytesFormatted value={brokerDiskUsage?.segmentSize} />
+            <BytesFormatted
+              value={brokerDiskUsage?.segmentSize}
+              precision={2}
+            />
           </Metrics.Indicator>
           <Metrics.Indicator label="Segment Count">
             {brokerDiskUsage?.segmentCount}

Some files were not shown because too many files changed in this diff