Explorar o código

Merge remote-tracking branch 'origin/master' into issues/2752

Roman Zabaluev %!s(int64=2) %!d(string=hai) anos
pai
achega
a465ff005b
Modificáronse 100 ficheiros con 1656 adicións e 1595 borrados
  1. 36 0
      .devcontainer/devcontainer.json
  2. 2 0
      .github/ISSUE_TEMPLATE/bug_report.md
  3. 11 0
      .github/ISSUE_TEMPLATE/config.yml
  4. 0 16
      .github/ISSUE_TEMPLATE/question.md
  5. 3 2
      .github/workflows/aws_publisher.yaml
  6. 1 1
      .github/workflows/backend.yml
  7. 1 1
      .github/workflows/block_merge.yml
  8. 7 5
      .github/workflows/branch-deploy.yml
  9. 2 2
      .github/workflows/branch-remove.yml
  10. 5 3
      .github/workflows/build-public-image.yml
  11. 1 1
      .github/workflows/create-branch-for-helm.yaml
  12. 2 2
      .github/workflows/cve.yaml
  13. 2 2
      .github/workflows/delete-public-image.yml
  14. 1 1
      .github/workflows/documentation.yaml
  15. 88 0
      .github/workflows/e2e-automation.yml
  16. 19 13
      .github/workflows/e2e-checks.yaml
  17. 43 0
      .github/workflows/e2e-manual.yml
  18. 75 0
      .github/workflows/e2e-weekly.yml
  19. 2 2
      .github/workflows/frontend.yaml
  20. 1 1
      .github/workflows/helm.yaml
  21. 4 2
      .github/workflows/master.yaml
  22. 1 1
      .github/workflows/pr-checks.yaml
  23. 1 1
      .github/workflows/release-serde-api.yaml
  24. 3 2
      .github/workflows/release.yaml
  25. 4 2
      .github/workflows/separate_env_public_create.yml
  26. 1 1
      .github/workflows/stale.yaml
  27. 2 2
      .github/workflows/terraform-deploy.yml
  28. 4 2
      CONTRIBUTING.md
  29. 54 133
      README.md
  30. 3 1
      SECURITY.md
  31. 2 2
      charts/kafka-ui/Chart.yaml
  32. 1 34
      charts/kafka-ui/README.md
  33. 5 0
      charts/kafka-ui/templates/_helpers.tpl
  34. 3 3
      charts/kafka-ui/templates/ingress.yaml
  35. 2 0
      charts/kafka-ui/templates/secret.yaml
  36. 3 0
      charts/kafka-ui/values.yaml
  37. 0 43
      docker-compose.md
  38. 27 27
      documentation/compose/e2e-tests.yaml
  39. 1 1
      documentation/compose/jaas/client.properties
  40. 8 10
      documentation/compose/jaas/schema_registry.jaas
  41. 5 5
      documentation/compose/kafka-ssl.yml
  42. 1 0
      documentation/compose/kafka-ui-arm64.yaml
  43. 4 65
      documentation/compose/kafka-ui-jmx-secured.yml
  44. 2 1
      documentation/compose/kafka-ui-sasl.yaml
  45. 10 8
      documentation/compose/kafka-ui-serdes.yaml
  46. 1 0
      documentation/compose/kafka-ui.yaml
  47. 4 0
      documentation/compose/proto/key-types.proto
  48. 2 0
      documentation/compose/proto/values.proto
  49. 0 41
      documentation/guides/AWS_IAM.md
  50. 0 123
      documentation/guides/DataMasking.md
  51. 0 51
      documentation/guides/Protobuf.md
  52. 0 58
      documentation/guides/SASL_SCRAM.md
  53. 0 7
      documentation/guides/SECURE_BROKER.md
  54. 0 71
      documentation/guides/SSO.md
  55. 0 169
      documentation/guides/Serialization.md
  56. 0 22
      documentation/project/ROADMAP.md
  57. 0 8
      documentation/project/contributing/README.md
  58. 0 24
      documentation/project/contributing/building-and-running-without-docker.md
  59. 0 63
      documentation/project/contributing/building.md
  60. 0 42
      documentation/project/contributing/prerequisites.md
  61. 0 8
      documentation/project/contributing/set-up-git.md
  62. 0 28
      documentation/project/contributing/testing.md
  63. 333 0
      etc/checkstyle/checkstyle-e2e.xml
  64. 2 2
      etc/checkstyle/checkstyle.xml
  65. 0 65
      helm_chart.md
  66. 8 2
      kafka-ui-api/Dockerfile
  67. 25 0
      kafka-ui-api/pom.xml
  68. 11 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/KafkaUiApplication.java
  69. 0 22
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KafkaConnectClientsFactory.java
  70. 206 106
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/RetryingKafkaConnectClient.java
  71. 88 21
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
  72. 1 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java
  73. 1 41
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java
  74. 33 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/WebclientProperties.java
  75. 3 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthProperties.java
  76. 5 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthPropertiesConverter.java
  77. 5 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/CognitoLogoutSuccessHandler.java
  78. 1 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AccessController.java
  79. 130 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ApplicationConfigController.java
  80. 2 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
  81. 10 12
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java
  82. 1 6
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
  83. 64 61
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java
  84. 1 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java
  85. 18 41
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java
  86. 13 19
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
  87. 4 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java
  88. 28 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/EmptyPollsCounter.java
  89. 9 14
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java
  90. 0 16
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilterStats.java
  91. 82 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java
  92. 79 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PollingSettings.java
  93. 4 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PollingThrottler.java
  94. 1 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ResultSizeLimiter.java
  95. 3 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java
  96. 3 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
  97. 19 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/FileUploadException.java
  98. 1 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java
  99. 2 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaCompatibilityException.java
  100. 0 12
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaTypeNotSupportedException.java

+ 36 - 0
.devcontainer/devcontainer.json

@@ -0,0 +1,36 @@
+{
+	"name": "Java",
+
+	"image": "mcr.microsoft.com/devcontainers/java:0-17",
+
+	"features": {
+		"ghcr.io/devcontainers/features/java:1": {
+			"version": "none",
+			"installMaven": "true",
+			"installGradle": "false"
+		},
+		"ghcr.io/devcontainers/features/docker-in-docker:2": {}
+	},
+
+	// Use 'forwardPorts' to make a list of ports inside the container available locally.
+	// "forwardPorts": [],
+
+	// Use 'postCreateCommand' to run commands after the container is created.
+	// "postCreateCommand": "java -version",
+
+	"customizations": {
+		"vscode": {
+			"extensions" : [
+				"vscjava.vscode-java-pack",
+				"vscjava.vscode-maven",
+				"vscjava.vscode-java-debug",
+				"EditorConfig.EditorConfig",
+				"ms-azuretools.vscode-docker",
+				"antfu.vite",
+				"ms-kubernetes-tools.vscode-kubernetes-tools",
+                "github.vscode-pull-request-github"
+			]
+		}
+	}
+
+}

+ 2 - 0
.github/ISSUE_TEMPLATE/bug_report.md

@@ -9,6 +9,8 @@ assignees: ''
 
 <!--
 
+We will close the issue without further explanation if you don't follow this template and don't provide the information requested within this template.
+
 Don't forget to check for existing issues/discussions regarding your proposal. We might already have it.
 https://github.com/provectus/kafka-ui/issues
 https://github.com/provectus/kafka-ui/discussions

+ 11 - 0
.github/ISSUE_TEMPLATE/config.yml

@@ -0,0 +1,11 @@
+blank_issues_enabled: false
+contact_links:
+  - name: Official documentation
+    url: https://docs.kafka-ui.provectus.io/
+    about: Before reaching out for support, please refer to our documentation. Read "FAQ" and "Common problems", also try using search there.
+  - name: Community Discord
+    url: https://discord.gg/4DWzD7pGE5
+    about: Chat with other users, get some support or ask questions.
+  - name: GitHub Discussions
+    url: https://github.com/provectus/kafka-ui/discussions
+    about: An alternative place to ask questions or to get some support.

+ 0 - 16
.github/ISSUE_TEMPLATE/question.md

@@ -1,16 +0,0 @@
----
-name: "❓ Question"
-about: Ask a question
-title: ''
-
----
-
-<!--
-
-To ask a question, please either:
-1. Open up a discussion (https://github.com/provectus/kafka-ui/discussions)
-2. Join us on discord (https://discord.gg/4DWzD7pGE5) and ask there.
-
-Don't forget to check/search for existing issues/discussions.
-
--->

+ 3 - 2
.github/workflows/aws_publisher.yaml

@@ -14,6 +14,7 @@ on:
         description: 'If set to true, the request to update AWS Server product version will be raised'
         required: true
         default: false
+        type: boolean
 
 jobs:
   build-ami:
@@ -30,7 +31,7 @@ jobs:
           echo "Packer will be triggered in this dir $WORK_DIR"
 
       - name: Configure AWS credentials for Kafka-UI account
-        uses: aws-actions/configure-aws-credentials@v1
+        uses: aws-actions/configure-aws-credentials@v2
         with:
           aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
           aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
@@ -57,7 +58,7 @@ jobs:
 
       # add fresh AMI to AWS Marketplace
       - name: Publish Artifact at Marketplace
-        if: ${{ github.event.inputs.PublishOnMarketplace == true }}
+        if: ${{ github.event.inputs.PublishOnMarketplace == 'true' }}
         env:
           PRODUCT_ID: ${{ secrets.AWS_SERVER_PRODUCT_ID }}
           RELEASE_VERSION: "${{ github.event.inputs.KafkaUIReleaseVersion }}"

+ 1 - 1
.github/workflows/backend.yml

@@ -1,4 +1,4 @@
-name: backend
+name: Backend build and test
 on:
   push:
     branches:

+ 1 - 1
.github/workflows/block_merge.yml

@@ -6,7 +6,7 @@ jobs:
   block_merge:
     runs-on: ubuntu-latest
     steps:
-      - uses: mheap/github-action-required-labels@v2
+      - uses: mheap/github-action-required-labels@v4
         with:
           mode: exactly
           count: 0

+ 7 - 5
.github/workflows/branch-deploy.yml

@@ -1,4 +1,4 @@
-name: DeployFromBranch
+name: Feature testing init
 on:
   workflow_dispatch:
 
@@ -10,6 +10,8 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v3
+        with:
+          ref: ${{ github.event.pull_request.head.sha }}
       - name: get branch name
         id: extract_branch
         run: |
@@ -43,7 +45,7 @@ jobs:
           restore-keys: |
             ${{ runner.os }}-buildx-
       - name: Configure AWS credentials for Kafka-UI account
-        uses: aws-actions/configure-aws-credentials@v1
+        uses: aws-actions/configure-aws-credentials@v2
         with:
           aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
           aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -53,7 +55,7 @@ jobs:
         uses: aws-actions/amazon-ecr-login@v1
       - name: Build and push
         id: docker_build_and_push
-        uses: docker/build-push-action@v3
+        uses: docker/build-push-action@v4
         with:
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api
@@ -84,7 +86,7 @@ jobs:
 
       - name: make comment with private deployment link
         if: ${{ github.event.label.name == 'status/feature_testing' }}
-        uses: peter-evans/create-or-update-comment@v2
+        uses: peter-evans/create-or-update-comment@v3
         with:
           issue-number: ${{ github.event.pull_request.number }}
           body: |
@@ -92,7 +94,7 @@ jobs:
 
       - name: make comment with public deployment link
         if: ${{ github.event.label.name == 'status/feature_testing_public' }}
-        uses: peter-evans/create-or-update-comment@v2
+        uses: peter-evans/create-or-update-comment@v3
         with:
           issue-number: ${{ github.event.pull_request.number }}
           body: |

+ 2 - 2
.github/workflows/branch-remove.yml

@@ -1,4 +1,4 @@
-name: RemoveCustomDeployment
+name: Feature testing destroy
 on:
   workflow_dispatch:
   pull_request:
@@ -21,7 +21,7 @@ jobs:
           git add ../kafka-ui-from-branch/
           git commit -m "removed env:${{ needs.build.outputs.deploy }}" && git push || true
       - name: make comment with deployment link
-        uses: peter-evans/create-or-update-comment@v2
+        uses: peter-evans/create-or-update-comment@v3
         with:
           issue-number: ${{ github.event.pull_request.number }}
           body: |

+ 5 - 3
.github/workflows/build-public-image.yml

@@ -9,6 +9,8 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v3
+        with:
+          ref: ${{ github.event.pull_request.head.sha }}
       - name: get branch name
         id: extract_branch
         run: |
@@ -40,7 +42,7 @@ jobs:
           restore-keys: |
             ${{ runner.os }}-buildx-
       - name: Configure AWS credentials for Kafka-UI account
-        uses: aws-actions/configure-aws-credentials@v1
+        uses: aws-actions/configure-aws-credentials@v2
         with:
           aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
           aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -52,7 +54,7 @@ jobs:
           registry-type: 'public'
       - name: Build and push
         id: docker_build_and_push
-        uses: docker/build-push-action@v3
+        uses: docker/build-push-action@v4
         with:
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api
@@ -63,7 +65,7 @@ jobs:
           cache-from: type=local,src=/tmp/.buildx-cache
           cache-to: type=local,dest=/tmp/.buildx-cache
       - name: make comment with private deployment link
-        uses: peter-evans/create-or-update-comment@v2
+        uses: peter-evans/create-or-update-comment@v3
         with:
           issue-number: ${{ github.event.pull_request.number }}
           body: |

+ 1 - 1
.github/workflows/create-branch-for-helm.yaml

@@ -1,4 +1,4 @@
-name: prepare-helm-release
+name: Prepare helm release
 on:
   repository_dispatch:
     types: [prepare-helm-release]

+ 2 - 2
.github/workflows/cve.yaml

@@ -40,7 +40,7 @@ jobs:
             ${{ runner.os }}-buildx-
 
       - name: Build docker image
-        uses: docker/build-push-action@v3
+        uses: docker/build-push-action@v4
         with:
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api
@@ -55,7 +55,7 @@ jobs:
           cache-to: type=local,dest=/tmp/.buildx-cache
 
       - name: Run CVE checks
-        uses: aquasecurity/trivy-action@0.8.0
+        uses: aquasecurity/trivy-action@0.10.0
         with:
           image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
           format: "table"

+ 2 - 2
.github/workflows/delete-public-image.yml

@@ -15,7 +15,7 @@ jobs:
           tag='${{ github.event.pull_request.number }}'
           echo "tag=${tag}" >> $GITHUB_OUTPUT
       - name: Configure AWS credentials for Kafka-UI account
-        uses: aws-actions/configure-aws-credentials@v1
+        uses: aws-actions/configure-aws-credentials@v2
         with:
           aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
           aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -33,7 +33,7 @@ jobs:
                 --image-ids imageTag=${{ steps.extract_branch.outputs.tag }} \
                 --region us-east-1
       - name: make comment with private deployment link
-        uses: peter-evans/create-or-update-comment@v2
+        uses: peter-evans/create-or-update-comment@v3
         with:
           issue-number: ${{ github.event.pull_request.number }}
           body: |

+ 1 - 1
.github/workflows/documentation.yaml

@@ -1,4 +1,4 @@
-name: Documentation
+name: Documentation URLs linter
 on:
   pull_request:
     types:

+ 88 - 0
.github/workflows/e2e-automation.yml

@@ -0,0 +1,88 @@
+name: E2E Automation suite
+on:
+  workflow_dispatch:
+    inputs:
+      test_suite:
+        description: 'Select test suite to run'
+        default: 'regression'
+        required: true
+        type: choice
+        options:
+          - regression
+          - sanity
+          - smoke
+      qase_token:
+        description: 'Set Qase token to enable integration'
+        required: false
+        type: string
+
+jobs:
+  build-and-test:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          ref: ${{ github.sha }}
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v2
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: eu-central-1
+      - name: Set up environment
+        id: set_env_values
+        run: |
+          cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
+      - name: Pull with Docker
+        id: pull_chrome
+        run: |
+          docker pull selenoid/vnc_chrome:103.0
+      - name: Set up JDK
+        uses: actions/setup-java@v3
+        with:
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
+      - name: Build with Maven
+        id: build_app
+        run: |
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
+          ./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
+      - name: Compose with Docker
+        id: compose_app
+        # use the following command until #819 will be fixed
+        run: |
+          docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
+          docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
+      - name: Run test suite
+        run: |
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
+          ./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ github.event.inputs.qase_token }} -Dsurefire.suiteXmlFiles='src/test/resources/${{ github.event.inputs.test_suite }}.xml' -Dsuite=${{ github.event.inputs.test_suite }} -f 'kafka-ui-e2e-checks' test -Pprod
+      - name: Generate Allure report
+        uses: simple-elf/allure-report-action@master
+        if: always()
+        id: allure-report
+        with:
+          allure_results: ./kafka-ui-e2e-checks/allure-results
+          gh_pages: allure-results
+          allure_report: allure-report
+          subfolder: allure-results
+          report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
+      - uses: jakejarvis/s3-sync-action@master
+        if: always()
+        env:
+          AWS_S3_BUCKET: 'kafkaui-allure-reports'
+          AWS_REGION: 'eu-central-1'
+          SOURCE_DIR: 'allure-history/allure-results'
+      - name: Deploy report to Amazon S3
+        if: always()
+        uses: Sibz/github-status-action@v1.1.6
+        with:
+          authToken: ${{secrets.GITHUB_TOKEN}}
+          context: "Click Details button to open Allure report"
+          state: "success"
+          sha: ${{ github.sha }}
+          target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
+      - name: Dump Docker logs on failure
+        if: failure()
+        uses: jwalton/gh-docker-logs@v2.2.1

+ 19 - 13
.github/workflows/e2e-checks.yaml

@@ -1,7 +1,7 @@
-name: e2e-checks
+name: E2E PR health check
 on:
   pull_request_target:
-    types: ["opened", "edited", "reopened", "synchronize"]
+    types: [ "opened", "edited", "reopened", "synchronize" ]
     paths:
       - "kafka-ui-api/**"
       - "kafka-ui-contract/**"
@@ -15,14 +15,20 @@ jobs:
       - uses: actions/checkout@v3
         with:
           ref: ${{ github.event.pull_request.head.sha }}
-      - name: Set the values
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v2
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: eu-central-1
+      - name: Set up environment
         id: set_env_values
         run: |
           cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
-      - name: pull docker
+      - name: Pull with Docker
         id: pull_chrome
         run: |
-          docker pull selenium/standalone-chrome:103.0
+          docker pull selenoid/vnc_chrome:103.0
       - name: Set up JDK
         uses: actions/setup-java@v3
         with:
@@ -33,16 +39,17 @@ jobs:
         id: build_app
         run: |
           ./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
-          ./mvnw -B -V -ntp clean package -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
-      - name: compose app
+          ./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
+      - name: Compose with Docker
         id: compose_app
         # use the following command until #819 will be fixed
         run: |
+          docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
           docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
-      - name: e2e run
+      - name: Run test suite
         run: |
           ./mvnw -B -ntp versions:set -DnewVersion=${{ github.event.pull_request.head.sha }}
-          ./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ secrets.QASEIO_API_TOKEN }} -pl '!kafka-ui-api' test -Pprod
+          ./mvnw -B -V -ntp -Dsurefire.suiteXmlFiles='src/test/resources/smoke.xml' -f 'kafka-ui-e2e-checks' test -Pprod
       - name: Generate allure report
         uses: simple-elf/allure-report-action@master
         if: always()
@@ -52,20 +59,19 @@ jobs:
           gh_pages: allure-results
           allure_report: allure-report
           subfolder: allure-results
+          report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
       - uses: jakejarvis/s3-sync-action@master
         if: always()
         env:
           AWS_S3_BUCKET: 'kafkaui-allure-reports'
-          AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
-          AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
           AWS_REGION: 'eu-central-1'
           SOURCE_DIR: 'allure-history/allure-results'
-      - name: Post the link to allure report
+      - name: Deploy report to Amazon S3
         if: always()
         uses: Sibz/github-status-action@v1.1.6
         with:
           authToken: ${{secrets.GITHUB_TOKEN}}
-          context: "Test report"
+          context: "Click Details button to open Allure report"
           state: "success"
           sha: ${{ github.event.pull_request.head.sha  || github.sha }}
           target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}

+ 43 - 0
.github/workflows/e2e-manual.yml

@@ -0,0 +1,43 @@
+name: E2E Manual suite
+on:
+  workflow_dispatch:
+    inputs:
+      test_suite:
+        description: 'Select test suite to run'
+        default: 'manual'
+        required: true
+        type: choice
+        options:
+          - manual
+          - qase
+      qase_token:
+        description: 'Set Qase token to enable integration'
+        required: true
+        type: string
+
+jobs:
+  build-and-test:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          ref: ${{ github.sha }}
+      - name: Set up environment
+        id: set_env_values
+        run: |
+          cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
+      - name: Set up JDK
+        uses: actions/setup-java@v3
+        with:
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
+      - name: Build with Maven
+        id: build_app
+        run: |
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
+          ./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
+      - name: Run test suite
+        run: |
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
+          ./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ github.event.inputs.qase_token }} -Dsurefire.suiteXmlFiles='src/test/resources/${{ github.event.inputs.test_suite }}.xml' -Dsuite=${{ github.event.inputs.test_suite }} -f 'kafka-ui-e2e-checks' test -Pprod

+ 75 - 0
.github/workflows/e2e-weekly.yml

@@ -0,0 +1,75 @@
+name: E2E Weekly suite
+on:
+  schedule:
+    - cron: '0 1 * * 1'
+
+jobs:
+  build-and-test:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v3
+        with:
+          ref: ${{ github.sha }}
+      - name: Configure AWS credentials
+        uses: aws-actions/configure-aws-credentials@v2
+        with:
+          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
+          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+          aws-region: eu-central-1
+      - name: Set up environment
+        id: set_env_values
+        run: |
+          cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
+      - name: Pull with Docker
+        id: pull_chrome
+        run: |
+          docker pull selenoid/vnc_chrome:103.0
+      - name: Set up JDK
+        uses: actions/setup-java@v3
+        with:
+          java-version: '17'
+          distribution: 'zulu'
+          cache: 'maven'
+      - name: Build with Maven
+        id: build_app
+        run: |
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
+          ./mvnw -B -V -ntp clean install -Pprod -Dmaven.test.skip=true ${{ github.event.inputs.extraMavenOptions }}
+      - name: Compose with Docker
+        id: compose_app
+        # use the following command until #819 will be fixed
+        run: |
+          docker-compose -f kafka-ui-e2e-checks/docker/selenoid-git.yaml up -d
+          docker-compose -f ./documentation/compose/e2e-tests.yaml up -d
+      - name: Run test suite
+        run: |
+          ./mvnw -B -ntp versions:set -DnewVersion=${{ github.sha }}
+          ./mvnw -B -V -ntp -DQASEIO_API_TOKEN=${{ secrets.QASEIO_API_TOKEN }} -Dsurefire.suiteXmlFiles='src/test/resources/sanity.xml' -Dsuite=weekly -f 'kafka-ui-e2e-checks' test -Pprod
+      - name: Generate Allure report
+        uses: simple-elf/allure-report-action@master
+        if: always()
+        id: allure-report
+        with:
+          allure_results: ./kafka-ui-e2e-checks/allure-results
+          gh_pages: allure-results
+          allure_report: allure-report
+          subfolder: allure-results
+          report_url: "http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com"
+      - uses: jakejarvis/s3-sync-action@master
+        if: always()
+        env:
+          AWS_S3_BUCKET: 'kafkaui-allure-reports'
+          AWS_REGION: 'eu-central-1'
+          SOURCE_DIR: 'allure-history/allure-results'
+      - name: Deploy report to Amazon S3
+        if: always()
+        uses: Sibz/github-status-action@v1.1.6
+        with:
+          authToken: ${{secrets.GITHUB_TOKEN}}
+          context: "Click Details button to open Allure report"
+          state: "success"
+          sha: ${{ github.sha }}
+          target_url: http://kafkaui-allure-reports.s3-website.eu-central-1.amazonaws.com/${{ github.run_number }}
+      - name: Dump Docker logs on failure
+        if: failure()
+        uses: jwalton/gh-docker-logs@v2.2.1

+ 2 - 2
.github/workflows/frontend.yaml

@@ -1,4 +1,4 @@
-name: frontend
+name: Frontend build and test
 on:
   push:
     branches:
@@ -24,7 +24,7 @@ jobs:
         with:
           version: 7.4.0
       - name: Install node
-        uses: actions/setup-node@v3.5.1
+        uses: actions/setup-node@v3.6.0
         with:
           node-version: "16.15.0"
           cache: "pnpm"

+ 1 - 1
.github/workflows/helm.yaml

@@ -1,4 +1,4 @@
-name: Helm
+name: Helm linter
 on:
  pull_request:
   types: ["opened", "edited", "reopened", "synchronize"]

+ 4 - 2
.github/workflows/master.yaml

@@ -1,4 +1,4 @@
-name: Master
+name: Master branch build & deploy
 on:
   workflow_dispatch:
   push:
@@ -9,6 +9,8 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v3
+        with:
+          ref: ${{ github.event.pull_request.head.sha }}
 
       - name: Set up JDK
         uses: actions/setup-java@v3
@@ -51,7 +53,7 @@ jobs:
 
       - name: Build and push
         id: docker_build_and_push
-        uses: docker/build-push-action@v3
+        uses: docker/build-push-action@v4
         with:
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api

+ 1 - 1
.github/workflows/pr-checks.yaml

@@ -7,7 +7,7 @@ jobs:
   task-check:
     runs-on: ubuntu-latest
     steps:
-      - uses: kentaro-m/task-completed-checker-action@v0.1.0
+      - uses: kentaro-m/task-completed-checker-action@v0.1.1
         with:
           repo-token: "${{ secrets.GITHUB_TOKEN }}"
       - uses: dekinderfiets/pr-description-enforcer@0.0.1

+ 1 - 1
.github/workflows/release-serde-api.yaml

@@ -1,4 +1,4 @@
-name: Release-serde-api
+name: Release serde api
 on: workflow_dispatch
 
 jobs:

+ 3 - 2
.github/workflows/release.yaml

@@ -12,6 +12,7 @@ jobs:
       - uses: actions/checkout@v3
         with:
           fetch-depth: 0
+          ref: ${{ github.event.pull_request.head.sha }}
 
       - run: |
           git config user.name github-actions
@@ -33,7 +34,7 @@ jobs:
           echo "version=${VERSION}" >> $GITHUB_OUTPUT
 
       - name: Upload files to a GitHub release
-        uses: svenstaro/upload-release-action@2.3.0
+        uses: svenstaro/upload-release-action@2.5.0
         with:
           repo_token: ${{ secrets.GITHUB_TOKEN }}
           file: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
@@ -71,7 +72,7 @@ jobs:
 
       - name: Build and push
         id: docker_build_and_push
-        uses: docker/build-push-action@v3
+        uses: docker/build-push-action@v4
         with:
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api

+ 4 - 2
.github/workflows/separate_env_public_create.yml

@@ -12,6 +12,8 @@ jobs:
     runs-on: ubuntu-latest
     steps:
       - uses: actions/checkout@v3
+        with:
+          ref: ${{ github.event.pull_request.head.sha }}
       - name: get branch name
         id: extract_branch
         run: |
@@ -45,7 +47,7 @@ jobs:
           restore-keys: |
             ${{ runner.os }}-buildx-
       - name: Configure AWS credentials for Kafka-UI account
-        uses: aws-actions/configure-aws-credentials@v1
+        uses: aws-actions/configure-aws-credentials@v2
         with:
           aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
           aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@@ -55,7 +57,7 @@ jobs:
         uses: aws-actions/amazon-ecr-login@v1
       - name: Build and push
         id: docker_build_and_push
-        uses: docker/build-push-action@v3
+        uses: docker/build-push-action@v4
         with:
           builder: ${{ steps.buildx.outputs.name }}
           context: kafka-ui-api

+ 1 - 1
.github/workflows/stale.yaml

@@ -7,7 +7,7 @@ jobs:
   stale:
     runs-on: ubuntu-latest
     steps:
-      - uses: actions/stale@v6
+      - uses: actions/stale@v8
         with:
           days-before-issue-stale: 7
           days-before-issue-close: 3

+ 2 - 2
.github/workflows/terraform-deploy.yml

@@ -1,4 +1,4 @@
-name: terraform_deploy
+name: Terraform deploy
 on:
   workflow_dispatch:
     inputs:
@@ -26,7 +26,7 @@ jobs:
           echo "Terraform will be triggered in this dir $TF_DIR"
 
       - name: Configure AWS credentials for Kafka-UI account
-        uses: aws-actions/configure-aws-credentials@v1
+        uses: aws-actions/configure-aws-credentials@v2
         with:
           aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
           aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

+ 4 - 2
CONTRIBUTING.md

@@ -1,3 +1,5 @@
+This guide is an exact copy of the same documented located [in our official docs](https://docs.kafka-ui.provectus.io/development/contributing). If there are any differences between the documents, the one located in our official docs should prevail.
+
 This guide aims to walk you through the process of working on issues and Pull Requests (PRs).
 
 Bear in mind that you will not be able to complete some steps on your own if you do not have a “write” permission. Feel free to reach out to the maintainers to help you unlock these activities.
@@ -20,7 +22,7 @@ You also need to consider labels. You can sort the issues by scope labels, such
 ## Grabbing the issue
 
 There is a bunch of criteria that make an issue feasible for development. <br/>
-The implementation of any features and/or their enhancements should be reasonable, must be backed by justified requirements (demanded by the community, [roadmap](documentation/project/ROADMAP.md) plans, etc.). The final decision is left for the maintainers' discretion.
+The implementation of any features and/or their enhancements should be reasonable, must be backed by justified requirements (demanded by the community, [roadmap](https://docs.kafka-ui.provectus.io/project/roadmap) plans, etc.). The final decision is left for the maintainers' discretion.
 
 All bugs should be confirmed as such (i.e. the behavior is unintended).
 
@@ -39,7 +41,7 @@ To keep the status of the issue clear to everyone, please keep the card's status
 
 ## Setting up a local development environment
 
-Please refer to [this guide](documentation/project/contributing/README.md).
+Please refer to [this guide](https://docs.kafka-ui.provectus.io/development/contributing).
 
 # Pull Requests
 

+ 54 - 133
README.md

@@ -1,21 +1,31 @@
 ![UI for Apache Kafka logo](documentation/images/kafka-ui-logo.png) UI for Apache Kafka&nbsp;
 ------------------
 #### Versatile, fast and lightweight web UI for managing Apache Kafka® clusters. Built by developers, for developers.
+<br/>
 
 [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://github.com/provectus/kafka-ui/blob/master/LICENSE)
 ![UI for Apache Kafka Price Free](documentation/images/free-open-source.svg)
 [![Release version](https://img.shields.io/github/v/release/provectus/kafka-ui)](https://github.com/provectus/kafka-ui/releases)
 [![Chat with us](https://img.shields.io/discord/897805035122077716)](https://discord.gg/4DWzD7pGE5)
+[![Docker pulls](https://img.shields.io/docker/pulls/provectuslabs/kafka-ui)](https://hub.docker.com/r/provectuslabs/kafka-ui)
 
-### DISCLAIMER
-<em>UI for Apache Kafka is a free tool built and supported by the open-source community. Curated by Provectus, it will remain free and open-source, without any paid features or subscription plans to be added in the future.
-Looking for the help of Kafka experts? Provectus can help you design, build, deploy, and manage Apache Kafka clusters and streaming applications. Discover [Professional Services for Apache Kafka](https://provectus.com/professional-services-apache-kafka/), to unlock the full potential of Kafka in your enterprise! </em>
-
+<p align="center">
+    <a href="https://docs.kafka-ui.provectus.io/">DOCS</a> • 
+    <a href="https://docs.kafka-ui.provectus.io/configuration/quick-start">QUICK START</a> • 
+    <a href="https://discord.gg/4DWzD7pGE5">COMMUNITY DISCORD</a>
+    <br/>
+    <a href="https://aws.amazon.com/marketplace/pp/prodview-ogtt5hfhzkq6a">AWS Marketplace</a>  •
+    <a href="https://www.producthunt.com/products/ui-for-apache-kafka/reviews/new">ProductHunt</a>
+</p>
 
 #### UI for Apache Kafka is a free, open-source web UI to monitor and manage Apache Kafka clusters.
 
 UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
 
+### DISCLAIMER
+<em>UI for Apache Kafka is a free tool built and supported by the open-source community. Curated by Provectus, it will remain free and open-source, without any paid features or subscription plans to be added in the future.
+Looking for the help of Kafka experts? Provectus can help you design, build, deploy, and manage Apache Kafka clusters and streaming applications. Discover [Professional Services for Apache Kafka](https://provectus.com/professional-services-apache-kafka/), to unlock the full potential of Kafka in your enterprise! </em>
+
 Set up UI for Apache Kafka with just a couple of easy commands to visualize your Kafka data in a comprehensible way. You can run the tool locally or in
 the cloud.
 
@@ -29,10 +39,10 @@ the cloud.
 * **View Consumer Groups** — view per-partition parked offsets, combined and per-partition lag
 * **Browse Messages** — browse messages with JSON, plain text, and Avro encoding
 * **Dynamic Topic Configuration** — create and configure new topics with dynamic configuration
-* **Configurable Authentification** — secure your installation with optional Github/Gitlab/Google OAuth 2.0
-* **Custom serialization/deserialization plugins** - use a ready-to-go serde for your data like AWS Glue or Smile, or code your own!
-* **Role based access control** - [manage permissions](https://github.com/provectus/kafka-ui/wiki/RBAC-(role-based-access-control)) to access the UI with granular precision
-* **Data masking** - [obfuscate](https://github.com/provectus/kafka-ui/blob/master/documentation/guides/DataMasking.md) sensitive data in topic messages
+* **Configurable Authentification** — [secure](https://docs.kafka-ui.provectus.io/configuration/authentication) your installation with optional Github/Gitlab/Google OAuth 2.0
+* **Custom serialization/deserialization plugins** - [use](https://docs.kafka-ui.provectus.io/configuration/serialization-serde) a ready-to-go serde for your data like AWS Glue or Smile, or code your own!
+* **Role based access control** - [manage permissions](https://docs.kafka-ui.provectus.io/configuration/rbac-role-based-access-control) to access the UI with granular precision
+* **Data masking** - [obfuscate](https://docs.kafka-ui.provectus.io/configuration/data-masking) sensitive data in topic messages
 
 # The Interface
 UI for Apache Kafka wraps major functions of Apache Kafka with an intuitive user interface.
@@ -60,157 +70,68 @@ There are 3 supported types of schemas: Avro®, JSON Schema, and Protobuf schema
 
 ![Create Schema Registry](documentation/images/Create_schema.gif)
 
-Before producing avro-encoded messages, you have to add an avro schema for the topic in Schema Registry. Now all these steps are easy to do
+Before producing avro/protobuf encoded messages, you have to add a schema for the topic in Schema Registry. Now all these steps are easy to do
 with a few clicks in a user-friendly interface.
 
 ![Avro Schema Topic](documentation/images/Schema_Topic.gif)
 
 # Getting Started
 
-To run UI for Apache Kafka, you can use a pre-built Docker image or build it locally.
-
-## Configuration
-
-We have plenty of [docker-compose files](documentation/compose/DOCKER_COMPOSE.md) as examples. They're built for various configuration stacks.
-
-# Guides
-
-- [SSO configuration](documentation/guides/SSO.md)
-- [AWS IAM configuration](documentation/guides/AWS_IAM.md)
-- [Docker-compose files](documentation/compose/DOCKER_COMPOSE.md)
-- [Connection to a secure broker](documentation/guides/SECURE_BROKER.md)
-- [Configure seriliazation/deserialization plugins or code your own](documentation/guides/Serialization.md)
+To run UI for Apache Kafka, you can use either a pre-built Docker image or build it (or a jar file) yourself.
 
-### Configuration File
-Example of how to configure clusters in the [application-local.yml](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/resources/application-local.yml) configuration file:
+## Quick start (Demo run)
 
-
-```sh
-kafka:
-  clusters:
-    -
-      name: local
-      bootstrapServers: localhost:29091
-      schemaRegistry: http://localhost:8085
-      schemaRegistryAuth:
-        username: username
-        password: password
-#     schemaNameTemplate: "%s-value"
-      metrics:
-        port: 9997
-        type: JMX
-    -
+```
+docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-ui
 ```
 
-* `name`: cluster name
-* `bootstrapServers`: where to connect
-* `schemaRegistry`: schemaRegistry's address
-* `schemaRegistryAuth.username`: schemaRegistry's basic authentication username
-* `schemaRegistryAuth.password`: schemaRegistry's basic authentication password
-* `schemaNameTemplate`: how keys are saved to schemaRegistry
-* `metrics.port`: open JMX port of a broker
-* `metrics.type`: Type of metrics, either JMX or PROMETHEUS. Defaulted to JMX.
-* `readOnly`: enable read only mode
-
-Configure as many clusters as you need by adding their configs below separated with `-`.
-
-## Running a Docker Image
-The official Docker image for UI for Apache Kafka is hosted here: [hub.docker.com/r/provectuslabs/kafka-ui](https://hub.docker.com/r/provectuslabs/kafka-ui).
+Then access the web UI at [http://localhost:8080](http://localhost:8080)
 
-Launch Docker container in the background:
-```sh
+The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
 
-docker run -p 8080:8080 \
-	-e KAFKA_CLUSTERS_0_NAME=local \
-	-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092 \
-	-d provectuslabs/kafka-ui:latest
+## Persistent installation
 
 ```
-Then access the web UI at [http://localhost:8080](http://localhost:8080).
-Further configuration with environment variables - [see environment variables](#env_variables)
-
-### Docker Compose
-
-If you prefer to use `docker-compose` please refer to the [documentation](docker-compose.md).
-
-### Helm chart
-Helm chart could be found under [charts/kafka-ui](https://github.com/provectus/kafka-ui/tree/master/charts/kafka-ui) directory
+services:
+  kafka-ui:
+    container_name: kafka-ui
+    image: provectuslabs/kafka-ui:latest
+    ports:
+      - 8080:8080
+    environment:
+      DYNAMIC_CONFIG_ENABLED: true
+    volumes:
+      - ~/kui/config.yml:/etc/kafkaui/dynamic_config.yaml
+```
 
-Quick-start instruction [here](helm_chart.md)
+Please refer to our [configuration](https://docs.kafka-ui.provectus.io/configuration/quick-start) page to proceed with further app configuration.
 
-## Building With Docker
+## Some useful configuration related links
 
-### Prerequisites
+[Web UI Cluster Configuration Wizard](https://docs.kafka-ui.provectus.io/configuration/configuration-wizard)
 
-Check [prerequisites.md](documentation/project/contributing/prerequisites.md)
+[Configuration file explanation](https://docs.kafka-ui.provectus.io/configuration/configuration-file)
 
-### Building and Running
+[Docker Compose examples](https://docs.kafka-ui.provectus.io/configuration/compose-examples)
 
-Check [building.md](documentation/project/contributing/building.md)
+[Misc configuration properties](https://docs.kafka-ui.provectus.io/configuration/misc-configuration-properties)
 
-## Building Without Docker
+## Helm charts
 
-### Prerequisites
+[Quick start](https://docs.kafka-ui.provectus.io/configuration/helm-charts/quick-start)
 
-[Prerequisites](documentation/project/contributing/prerequisites.md) will mostly remain the same with the exception of docker.
+## Building from sources
 
-### Running without Building
+[Quick start](https://docs.kafka-ui.provectus.io/development/building/prerequisites) with building
 
-[How to run quickly without building](documentation/project/contributing/building-and-running-without-docker.md#run_without_docker_quickly)
+## Liveliness and readiness probes
+Liveliness and readiness endpoint is at `/actuator/health`.<br/>
+Info endpoint (build info) is located at `/actuator/info`.
 
-### Building and Running
+# Configuration options
 
-[How to build and run](documentation/project/contributing/building-and-running-without-docker.md#build_and_run_without_docker)
+All of the environment variables/config properties could be found [here](https://docs.kafka-ui.provectus.io/configuration/misc-configuration-properties).
 
-## Liveliness and readiness probes
-Liveliness and readiness endpoint is at `/actuator/health`.
-Info endpoint (build info) is located at `/actuator/info`.
+# Contributing
 
-## <a name="env_variables"></a> Environment Variables
-
-Alternatively, each variable of the .yml file can be set with an environment variable.
-For example, if you want to use an environment variable to set the `name` parameter, you can write it like this: `KAFKA_CLUSTERS_2_NAME`
-
-|Name               	|Description
-|-----------------------|-------------------------------
-|`SERVER_SERVLET_CONTEXT_PATH` | URI basePath
-|`LOGGING_LEVEL_ROOT`        	| Setting log level (trace, debug, info, warn, error). Default: info
-|`LOGGING_LEVEL_COM_PROVECTUS` |Setting log level (trace, debug, info, warn, error). Default: debug
-|`SERVER_PORT` |Port for the embedded server. Default: `8080`
-|`KAFKA_ADMIN-CLIENT-TIMEOUT` | Kafka API timeout in ms. Default: `30000`
-|`KAFKA_CLUSTERS_0_NAME` | Cluster name
-|`KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS` 	|Address where to connect
-|`KAFKA_CLUSTERS_0_KSQLDBSERVER` 	| KSQL DB server address
-|`KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_USERNAME` 	| KSQL DB server's basic authentication username
-|`KAFKA_CLUSTERS_0_KSQLDBSERVERAUTH_PASSWORD` 	| KSQL DB server's basic authentication password
-|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTORELOCATION`   	|Path to the JKS keystore to communicate to KSQL DB
-|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTOREPASSWORD`   	|Password of the JKS keystore for KSQL DB
-|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTORELOCATION`   	|Path to the JKS truststore to communicate to KSQL DB
-|`KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTOREPASSWORD`   	|Password of the JKS truststore for KSQL DB
-|`KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL` 	|Security protocol to connect to the brokers. For SSL connection use "SSL", for plaintext connection don't set this environment variable
-|`KAFKA_CLUSTERS_0_SCHEMAREGISTRY`   	|SchemaRegistry's address
-|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME`   	|SchemaRegistry's basic authentication username
-|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD`   	|SchemaRegistry's basic authentication password
-|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTORELOCATION`   	|Path to the JKS keystore to communicate to SchemaRegistry
-|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTOREPASSWORD`   	|Password of the JKS keystore for SchemaRegistry
-|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTORELOCATION`   	|Path to the JKS truststore to communicate to SchemaRegistry
-|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTOREPASSWORD`   	|Password of the JKS truststore for SchemaRegistry
-|`KAFKA_CLUSTERS_0_SCHEMANAMETEMPLATE` |How keys are saved to schemaRegistry
-|`KAFKA_CLUSTERS_0_METRICS_PORT`        	 |Open metrics port of a broker
-|`KAFKA_CLUSTERS_0_METRICS_TYPE`        	 |Type of metrics retriever to use. Valid values are JMX (default) or PROMETHEUS. If Prometheus, then metrics are read from prometheus-jmx-exporter instead of jmx
-|`KAFKA_CLUSTERS_0_READONLY`        	|Enable read-only mode. Default: false
-|`KAFKA_CLUSTERS_0_DISABLELOGDIRSCOLLECTION`        	|Disable collecting segments information. It should be true for confluent cloud. Default: false
-|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME` |Given name for the Kafka Connect cluster
-|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint
-|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME`| Kafka Connect cluster's basic authentication username
-|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD`| Kafka Connect cluster's basic authentication password
-|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTORELOCATION`| Path to the JKS keystore to communicate to Kafka Connect
-|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTOREPASSWORD`| Password of the JKS keystore for Kafka Connect
-|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTORELOCATION`| Path to the JKS truststore to communicate to Kafka Connect
-|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTOREPASSWORD`| Password of the JKS truststore for Kafka Connect
-|`KAFKA_CLUSTERS_0_METRICS_SSL`          |Enable SSL for Metrics? `true` or `false`. For advanced setup, see `kafka-ui-jmx-secured.yml`
-|`KAFKA_CLUSTERS_0_METRICS_USERNAME` |Username for Metrics authentication
-|`KAFKA_CLUSTERS_0_METRICS_PASSWORD` |Password for Metrics authentication
-|`KAFKA_CLUSTERS_0_POLLING_THROTTLE_RATE` |Max traffic rate (bytes/sec) that kafka-ui allowed to reach when polling messages from the cluster. Default: 0 (not limited)
-|`TOPIC_RECREATE_DELAY_SECONDS` |Time delay between topic deletion and topic creation attempts for topic recreate functionality. Default: 1
-|`TOPIC_RECREATE_MAXRETRIES`  |Number of attempts of topic creation after topic deletion for topic recreate functionality. Default: 15
+Please refer to [contributing guide](https://docs.kafka-ui.provectus.io/development/contributing), we'll guide you from there.

+ 3 - 1
SECURITY.md

@@ -6,7 +6,9 @@ Following versions of the project are currently being supported with security up
 
 | Version | Supported          |
 | ------- | ------------------ |
-| 0.4.x   | :white_check_mark: |
+| 0.6.x   | :white_check_mark: |
+| 0.5.x   | :x:                |
+| 0.4.x   | :x:                |
 | 0.3.x   | :x:                |
 | 0.2.x   | :x:                |
 | 0.1.x   | :x:                |

+ 2 - 2
charts/kafka-ui/Chart.yaml

@@ -2,6 +2,6 @@ apiVersion: v2
 name: kafka-ui
 description: A Helm chart for kafka-UI
 type: application
-version: 0.5.1
-appVersion: v0.5.0
+version: 0.6.2
+appVersion: v0.6.2
 icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png

+ 1 - 34
charts/kafka-ui/README.md

@@ -1,34 +1 @@
-# Kafka-UI Helm Chart
-
-## Configuration
-
-Most of the Helm charts parameters are common, follow table describe unique parameters related to application configuration.
-
-### Kafka-UI parameters
-
-| Parameter                                | Description                                                                                                                                    | Default |
-| ---------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------- |
-| `existingConfigMap`                      | Name of the existing ConfigMap with Kafka-UI environment variables                                                                             | `nil`   |
-| `existingSecret`                         | Name of the existing Secret with Kafka-UI environment variables                                                                                | `nil`   |
-| `envs.secret`                            | Set of the sensitive environment variables to pass to Kafka-UI                                                                                 | `{}`    |
-| `envs.config`                            | Set of the environment variables to pass to Kafka-UI                                                                                           | `{}`    |
-| `yamlApplicationConfigConfigMap`         | Map with name and keyName keys, name refers to the existing ConfigMap, keyName refers to the ConfigMap key with Kafka-UI config in Yaml format | `{}`    |
-| `yamlApplicationConfig`                  | Kafka-UI config in Yaml format                                                                                                                 | `{}`    |
-| `networkPolicy.enabled`                  | Enable network policies                                                                                                                        | `false` |
-| `networkPolicy.egressRules.customRules`  | Custom network egress policy rules                                                                                                             | `[]`    |
-| `networkPolicy.ingressRules.customRules` | Custom network ingress policy rules                                                                                                            | `[]`    |
-| `podLabels`                              | Extra labels for Kafka-UI pod                                                                                                                  | `{}`    |
-
-
-## Example
-
-To install Kafka-UI need to execute follow:
-``` bash
-helm repo add kafka-ui https://provectus.github.io/kafka-ui
-helm install kafka-ui kafka-ui/kafka-ui --set envs.config.KAFKA_CLUSTERS_0_NAME=local --set envs.config.KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
-```
-To connect to Kafka-UI web application need to execute:
-``` bash
-kubectl port-forward svc/kafka-ui 8080:80
-```
-Open the `http://127.0.0.1:8080` on the browser to access Kafka-UI.
+Please refer to our [documentation](https://docs.kafka-ui.provectus.io/configuration/helm-charts) to get some info on our helm charts.

+ 5 - 0
charts/kafka-ui/templates/_helpers.tpl

@@ -68,6 +68,11 @@ This allows us to check if the registry of the image is specified or not.
 */}}
 {{- define "kafka-ui.imageName" -}}
 {{- $registryName := .Values.image.registry -}}
+{{- if .Values.global }}
+    {{- if .Values.global.imageRegistry }}
+     {{- $registryName = .Values.global.imageRegistry -}}
+    {{- end -}}
+{{- end -}}
 {{- $repository := .Values.image.repository -}}
 {{- $tag := .Values.image.tag | default .Chart.AppVersion -}}
 {{- if $registryName }}

+ 3 - 3
charts/kafka-ui/templates/ingress.yaml

@@ -35,7 +35,7 @@ spec:
 {{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") $isHigher1p19 -}}
           {{- range .Values.ingress.precedingPaths }}
           - path: {{ .path }}
-            pathType: Prefix
+            pathType: {{ .Values.ingress.pathType }}
             backend:
               service:
                 name: {{ .serviceName }}
@@ -47,13 +47,13 @@ spec:
                 name: {{ $fullName }}
                 port:
                   number: {{ $svcPort }}
-            pathType: Prefix
+            pathType: {{ .Values.ingress.pathType }}
 {{- if .Values.ingress.path }}
             path: {{ .Values.ingress.path }}
 {{- end }}
           {{- range .Values.ingress.succeedingPaths }}
           - path: {{ .path }}
-            pathType: Prefix
+            pathType: {{ .Values.ingress.pathType }}
             backend:
               service:
                 name: {{ .serviceName }}

+ 2 - 0
charts/kafka-ui/templates/secret.yaml

@@ -1,3 +1,4 @@
+{{- if .Values.envs.secret -}}
 apiVersion: v1
 kind: Secret
 metadata:
@@ -9,3 +10,4 @@ data:
   {{- range $key, $val := .Values.envs.secret }}
   {{ $key }}: {{ $val | b64enc | quote }}
   {{- end -}}
+{{- end}}

+ 3 - 0
charts/kafka-ui/values.yaml

@@ -111,6 +111,9 @@ ingress:
   # The path for the Ingress
   path: "/"
 
+  # The path type for the Ingress
+  pathType: "Prefix"  
+
   # The hostname for the Ingress
   host: ""
 

+ 0 - 43
docker-compose.md

@@ -1,43 +0,0 @@
-# Quick Start with docker-compose
-
-Environment variables documentation - [see usage](README.md#env_variables).<br/>
-We have plenty of example files with more complex configurations. Please check them out in ``docker`` directory.
-
-* Add a new service in docker-compose.yml
-
-```yaml
-version: '2'
-services:
-  kafka-ui:
-    image: provectuslabs/kafka-ui
-    container_name: kafka-ui
-    ports:
-      - "8080:8080"
-    restart: always
-    environment:
-      - KAFKA_CLUSTERS_0_NAME=local
-      - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
-```
-
-* If you prefer UI for Apache Kafka in read only mode
-   
-```yaml
-version: '2'
-services:
-  kafka-ui:
-    image: provectuslabs/kafka-ui
-    container_name: kafka-ui
-    ports:
-      - "8080:8080"
-    restart: always
-    environment:
-      - KAFKA_CLUSTERS_0_NAME=local
-      - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
-      - KAFKA_CLUSTERS_0_READONLY=true
-```
-  
-* Start UI for Apache Kafka process
-
-```bash
-docker-compose up -d kafka-ui
-```

+ 27 - 27
documentation/compose/e2e-tests.yaml

@@ -11,14 +11,14 @@ services:
       test: wget --no-verbose --tries=1 --spider  http://localhost:8080/actuator/health
       interval: 30s
       timeout: 10s
-      retries: 10  
+      retries: 10
     depends_on:
-        kafka0:
-          condition: service_healthy
-        schemaregistry0:
-          condition: service_healthy
-        kafka-connect0:
-          condition: service_healthy
+      kafka0:
+        condition: service_healthy
+      schemaregistry0:
+        condition: service_healthy
+      kafka-connect0:
+        condition: service_healthy
     environment:
       KAFKA_CLUSTERS_0_NAME: local
       KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
@@ -33,10 +33,10 @@ services:
     hostname: kafka0
     container_name: kafka0
     healthcheck:
-     test: unset JMX_PORT && KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9999" && kafka-broker-api-versions --bootstrap-server=localhost:9092
-     interval: 30s
-     timeout: 10s
-     retries: 10
+      test: unset JMX_PORT && KAFKA_JMX_OPTS="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9999" && kafka-broker-api-versions --bootstrap-server=localhost:9092
+      interval: 30s
+      timeout: 10s
+      retries: 10
     ports:
       - "9092:9092"
       - "9997:9997"
@@ -68,12 +68,12 @@ services:
       - 8085:8085
     depends_on:
       kafka0:
-          condition: service_healthy
+        condition: service_healthy
     healthcheck:
-     test: ["CMD", "timeout", "1", "curl", "--silent", "--fail", "http://schemaregistry0:8085/subjects"]
-     interval: 30s
-     timeout: 10s
-     retries: 10
+      test: [ "CMD", "timeout", "1", "curl", "--silent", "--fail", "http://schemaregistry0:8085/subjects" ]
+      interval: 30s
+      timeout: 10s
+      retries: 10
     environment:
       SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
       SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
@@ -93,11 +93,11 @@ services:
       - 8083:8083
     depends_on:
       kafka0:
-          condition: service_healthy
+        condition: service_healthy
       schemaregistry0:
-          condition: service_healthy
+        condition: service_healthy
     healthcheck:
-      test: ["CMD", "nc", "127.0.0.1", "8083"]
+      test: [ "CMD", "nc", "127.0.0.1", "8083" ]
       interval: 30s
       timeout: 10s
       retries: 10
@@ -118,8 +118,8 @@ services:
       CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
       CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
       CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
-#      AWS_ACCESS_KEY_ID: ""
-#      AWS_SECRET_ACCESS_KEY: ""
+  #      AWS_ACCESS_KEY_ID: ""
+  #      AWS_SECRET_ACCESS_KEY: ""
 
   kafka-init-topics:
     image: confluentinc/cp-kafka:7.2.1
@@ -127,7 +127,7 @@ services:
       - ./message.json:/data/message.json
     depends_on:
       kafka0:
-          condition: service_healthy
+        condition: service_healthy
     command: "bash -c 'echo Waiting for Kafka to be ready... && \
                cub kafka-ready -b kafka0:29092 1 30 && \
                kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
@@ -142,10 +142,10 @@ services:
     ports:
       - 5432:5432
     healthcheck:
-      test: ["CMD-SHELL", "pg_isready -U dev_user"]
+      test: [ "CMD-SHELL", "pg_isready -U dev_user" ]
       interval: 10s
       timeout: 5s
-      retries: 5  
+      retries: 5
     environment:
       POSTGRES_USER: 'dev_user'
       POSTGRES_PASSWORD: '12345'
@@ -154,7 +154,7 @@ services:
     image: ellerbrock/alpine-bash-curl-ssl
     depends_on:
       postgres-db:
-          condition: service_healthy
+        condition: service_healthy
       kafka-connect0:
         condition: service_healthy
     volumes:
@@ -164,7 +164,7 @@ services:
   ksqldb:
     image: confluentinc/ksqldb-server:0.18.0
     healthcheck:
-      test: ["CMD", "timeout", "1", "curl", "--silent", "--fail", "http://localhost:8088/info"]
+      test: [ "CMD", "timeout", "1", "curl", "--silent", "--fail", "http://localhost:8088/info" ]
       interval: 30s
       timeout: 10s
       retries: 10
@@ -174,7 +174,7 @@ services:
       kafka-connect0:
         condition: service_healthy
       schemaregistry0:
-         condition: service_healthy
+        condition: service_healthy
     ports:
       - 8088:8088
     environment:

+ 1 - 1
documentation/compose/jaas/client.properties

@@ -11,4 +11,4 @@ KafkaClient {
     user_admin="admin-secret";
 };
 
-Client {};
+Client {};

+ 8 - 10
documentation/compose/jaas/schema_registry.jaas

@@ -15,27 +15,25 @@ services:
       KAFKA_CLUSTERS_0_NAME: local
       KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
       KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 # SSL LISTENER!
-      KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
-      KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
-      KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
-      KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD: secret
       KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # DISABLE COMMON NAME VERIFICATION
+
       KAFKA_CLUSTERS_0_SCHEMAREGISTRY: https://schemaregistry0:8085
       KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTORELOCATION: /kafka.keystore.jks
       KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_KEYSTOREPASSWORD: "secret"
-      KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTORELOCATION: /kafka.truststore.jks
-      KAFKA_CLUSTERS_0_SCHEMAREGISTRYSSL_TRUSTSTOREPASSWORD: "secret"
+
       KAFKA_CLUSTERS_0_KSQLDBSERVER: https://ksqldb0:8088
       KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTORELOCATION: /kafka.keystore.jks
       KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_KEYSTOREPASSWORD: "secret"
-      KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTORELOCATION: /kafka.truststore.jks
-      KAFKA_CLUSTERS_0_KSQLDBSERVERSSL_TRUSTSTOREPASSWORD: "secret"
+
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: local
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: https://kafka-connect0:8083
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTORELOCATION: /kafka.keystore.jks
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_KEYSTOREPASSWORD: "secret"
-      KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTORELOCATION: /kafka.truststore.jks
-      KAFKA_CLUSTERS_0_KAFKACONNECT_0_TRUSTSTOREPASSWORD: "secret"
+
+      KAFKA_CLUSTERS_0_SSL_TRUSTSTORELOCATION: /kafka.truststore.jks
+      KAFKA_CLUSTERS_0_SSL_TRUSTSTOREPASSWORD: "secret"
+      DYNAMIC_CONFIG_ENABLED: 'true'  # not necessary for ssl, added for tests
+
     volumes:
       - ./ssl/kafka.truststore.jks:/kafka.truststore.jks
       - ./ssl/kafka.keystore.jks:/kafka.keystore.jks

+ 5 - 5
documentation/compose/kafka-ssl.yml

@@ -11,11 +11,11 @@ services:
     environment:
       KAFKA_CLUSTERS_0_NAME: local
       KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
-      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # SSL LISTENER!
-      KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
-      KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
       KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
-      KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD: secret
+      KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_PASSWORD: "secret"
+      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # SSL LISTENER!
+      KAFKA_CLUSTERS_0_SSL_TRUSTSTORELOCATION: /kafka.truststore.jks
+      KAFKA_CLUSTERS_0_SSL_TRUSTSTOREPASSWORD: "secret"
       KAFKA_CLUSTERS_0_PROPERTIES_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # DISABLE COMMON NAME VERIFICATION
     volumes:
       - ./ssl/kafka.truststore.jks:/kafka.truststore.jks
@@ -60,4 +60,4 @@ services:
       - ./ssl/creds:/etc/kafka/secrets/creds
       - ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
       - ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
-    command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
+    command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"

+ 1 - 0
documentation/compose/kafka-ui-arm64.yaml

@@ -19,6 +19,7 @@ services:
       KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schema-registry0:8085
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
+      DYNAMIC_CONFIG_ENABLED: 'true'  # not necessary, added for tests
 
   kafka0:
     image: confluentinc/cp-kafka:7.2.1.arm64

+ 4 - 65
documentation/compose/kafka-ui-jmx-secured.yml

@@ -7,11 +7,8 @@ services:
     image: provectuslabs/kafka-ui:latest
     ports:
       - 8080:8080
-      - 5005:5005
     depends_on:
       - kafka0
-      - schemaregistry0
-      - kafka-connect0
     environment:
       KAFKA_CLUSTERS_0_NAME: local
       KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
@@ -19,15 +16,12 @@ services:
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
       KAFKA_CLUSTERS_0_METRICS_PORT: 9997
-      KAFKA_CLUSTERS_0_METRICS_SSL: 'true'
       KAFKA_CLUSTERS_0_METRICS_USERNAME: root
       KAFKA_CLUSTERS_0_METRICS_PASSWORD: password
-      JAVA_OPTS: >-
-        -agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=*:5005
-        -Djavax.net.ssl.trustStore=/jmx/clienttruststore
-        -Djavax.net.ssl.trustStorePassword=12345678
-        -Djavax.net.ssl.keyStore=/jmx/clientkeystore
-        -Djavax.net.ssl.keyStorePassword=12345678
+      KAFKA_CLUSTERS_0_METRICS_KEYSTORE_LOCATION: /jmx/clientkeystore
+      KAFKA_CLUSTERS_0_METRICS_KEYSTORE_PASSWORD: '12345678'
+      KAFKA_CLUSTERS_0_SSL_TRUSTSTORE_LOCATION: /jmx/clienttruststore
+      KAFKA_CLUSTERS_0_SSL_TRUSTSTORE_PASSWORD: '12345678'
     volumes:
       - ./jmx/clienttruststore:/jmx/clienttruststore
       - ./jmx/clientkeystore:/jmx/clientkeystore
@@ -70,8 +64,6 @@ services:
         -Dcom.sun.management.jmxremote.access.file=/jmx/jmxremote.access
         -Dcom.sun.management.jmxremote.rmi.port=9997
         -Djava.rmi.server.hostname=kafka0
-        -Djava.rmi.server.logCalls=true
-#        -Djavax.net.debug=ssl:handshake
     volumes:
       - ./jmx/serverkeystore:/jmx/serverkeystore
       - ./jmx/servertruststore:/jmx/servertruststore
@@ -79,56 +71,3 @@ services:
       - ./jmx/jmxremote.access:/jmx/jmxremote.access
       - ./scripts/update_run.sh:/tmp/update_run.sh
     command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
-
-  schemaregistry0:
-    image: confluentinc/cp-schema-registry:7.2.1
-    ports:
-      - 8085:8085
-    depends_on:
-      - kafka0
-    environment:
-      SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
-      SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
-      SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
-      SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
-
-      SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
-      SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
-      SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
-
-  kafka-connect0:
-    image: confluentinc/cp-kafka-connect:7.2.1
-    ports:
-      - 8083:8083
-    depends_on:
-      - kafka0
-      - schemaregistry0
-    environment:
-      CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
-      CONNECT_GROUP_ID: compose-connect-group
-      CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
-      CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
-      CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
-      CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
-      CONNECT_STATUS_STORAGE_TOPIC: _connect_status
-      CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
-      CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
-      CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
-      CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
-      CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
-      CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
-      CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
-      CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
-      CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
-
-  kafka-init-topics:
-    image: confluentinc/cp-kafka:7.2.1
-    volumes:
-      - ./message.json:/data/message.json
-    depends_on:
-      - kafka0
-    command: "bash -c 'echo Waiting for Kafka to be ready... && \
-               cub kafka-ready -b kafka0:29092 1 30 && \
-               kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
-               kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
-               kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"

+ 2 - 1
documentation/compose/kafka-ui-sasl.yaml

@@ -15,6 +15,7 @@ services:
       KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
       KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
       KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
+      DYNAMIC_CONFIG_ENABLED: true # not necessary for sasl auth, added for tests
 
   kafka:
     image: confluentinc/cp-kafka:7.2.1
@@ -48,4 +49,4 @@ services:
     volumes:
       - ./scripts/update_run.sh:/tmp/update_run.sh
       - ./jaas:/etc/kafka/jaas
-    command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
+    command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"

+ 10 - 8
documentation/compose/kafka-ui-serdes.yaml

@@ -14,13 +14,16 @@ services:
             kafka.clusters.0.name: SerdeExampleCluster
             kafka.clusters.0.bootstrapServers: kafka0:29092
             kafka.clusters.0.schemaRegistry: http://schemaregistry0:8085
-            # optional auth and ssl properties for SR
+
+            # optional SSL settings for cluster (will be used by SchemaRegistry serde, if set)
+            #kafka.clusters.0.ssl.keystoreLocation: /kafka.keystore.jks
+            #kafka.clusters.0.ssl.keystorePassword: "secret"
+            #kafka.clusters.0.ssl.truststoreLocation: /kafka.truststore.jks
+            #kafka.clusters.0.ssl.truststorePassword: "secret"
+
+            # optional auth properties for SR
             #kafka.clusters.0.schemaRegistryAuth.username: "use"
             #kafka.clusters.0.schemaRegistryAuth.password: "pswrd"
-            #kafka.clusters.0.schemaRegistrySSL.keystoreLocation: /kafka.keystore.jks
-            #kafka.clusters.0.schemaRegistrySSL.keystorePassword: "secret"
-            #kafka.clusters.0.schemaRegistrySSL.truststoreLocation: /kafka.truststore.jks
-            #kafka.clusters.0.schemaRegistrySSL.truststorePassword: "secret"
 
             kafka.clusters.0.defaultKeySerde: Int32  #optional
             kafka.clusters.0.defaultValueSerde: String #optional
@@ -28,8 +31,7 @@ services:
             kafka.clusters.0.serde.0.name: ProtobufFile
             kafka.clusters.0.serde.0.topicKeysPattern: "topic1"
             kafka.clusters.0.serde.0.topicValuesPattern: "topic1"
-            kafka.clusters.0.serde.0.properties.protobufFiles.0: /protofiles/key-types.proto
-            kafka.clusters.0.serde.0.properties.protobufFiles.1: /protofiles/values.proto
+            kafka.clusters.0.serde.0.properties.protobufFilesDir: /protofiles/
             kafka.clusters.0.serde.0.properties.protobufMessageNameForKey: test.MyKey # default type for keys
             kafka.clusters.0.serde.0.properties.protobufMessageName: test.MyValue # default type for values
             kafka.clusters.0.serde.0.properties.protobufMessageNameForKeyByTopic.topic1: test.MySpecificTopicKey # keys type for topic "topic1"
@@ -52,7 +54,7 @@ services:
             kafka.clusters.0.serde.4.properties.keySchemaNameTemplate: "%s-key"
             kafka.clusters.0.serde.4.properties.schemaNameTemplate: "%s-value"
             #kafka.clusters.0.serde.4.topicValuesPattern: "sr2-topic.*"
-            # optional auth and ssl properties for SR:
+            # optional auth and ssl properties for SR (overrides cluster-level):
             #kafka.clusters.0.serde.4.properties.username: "user"
             #kafka.clusters.0.serde.4.properties.password: "passw"
             #kafka.clusters.0.serde.4.properties.keystoreLocation:  /kafka.keystore.jks

+ 1 - 0
documentation/compose/kafka-ui.yaml

@@ -24,6 +24,7 @@ services:
       KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
       KAFKA_CLUSTERS_1_METRICS_PORT: 9998
       KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
+      DYNAMIC_CONFIG_ENABLED: 'true'
 
   kafka0:
     image: confluentinc/cp-kafka:7.2.1

+ 4 - 0
documentation/compose/proto/key-types.proto

@@ -1,11 +1,15 @@
 syntax = "proto3";
 package test;
 
+import "google/protobuf/wrappers.proto";
+
 message MyKey {
     string myKeyF1 = 1;
+    google.protobuf.UInt64Value uint_64_wrapper = 2;
 }
 
 message MySpecificTopicKey {
     string special_field1 = 1;
     string special_field2 = 2;
+    google.protobuf.FloatValue float_wrapper = 3;
 }

+ 2 - 0
documentation/compose/proto/values.proto

@@ -9,4 +9,6 @@ message MySpecificTopicValue {
 message MyValue {
   int32 version = 1;
   string payload = 2;
+  map<int32, string> intToStringMap = 3;
+  map<string, MyValue> strToObjMap  = 4;
 }

+ 0 - 41
documentation/guides/AWS_IAM.md

@@ -1,41 +0,0 @@
-# How to configure AWS IAM Authentication
-
-UI for Apache Kafka comes with built-in [aws-msk-iam-auth](https://github.com/aws/aws-msk-iam-auth) library.
-
-You could pass sasl configs in properties section for each cluster.
-
-More details could be found here: [aws-msk-iam-auth](https://github.com/aws/aws-msk-iam-auth)
- 
-## Examples: 
-
-Please replace 
-* <KAFKA_URL> with broker list
-* <PROFILE_NAME> with your aws profile
-
-
-### Running From Docker Image
-
-```sh
-docker run -p 8080:8080 \
-    -e KAFKA_CLUSTERS_0_NAME=local \
-    -e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<KAFKA_URL> \
-    -e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \
-    -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=AWS_MSK_IAM \
-    -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_CLIENT_CALLBACK_HANDLER_CLASS=software.amazon.msk.auth.iam.IAMClientCallbackHandler \
-    -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName="<PROFILE_NAME>"; \
-    -d provectuslabs/kafka-ui:latest 
-```
-
-### Configuring by application.yaml
-
-```yaml
-kafka:
-  clusters:
-    - name: local
-      bootstrapServers: <KAFKA_URL>
-      properties:
-        security.protocol: SASL_SSL
-        sasl.mechanism: AWS_MSK_IAM
-        sasl.client.callback.handler.class: software.amazon.msk.auth.iam.IAMClientCallbackHandler
-        sasl.jaas.config: software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName="<PROFILE_NAME>";
-```

+ 0 - 123
documentation/guides/DataMasking.md

@@ -1,123 +0,0 @@
-# Topics data masking
-
-You can configure kafka-ui to mask sensitive data shown in Messages page.
-
-Several masking policies supported:
-
-### REMOVE
-For json objects - remove target fields, otherwise - return "null" string.
-```yaml
-- type: REMOVE
-  fields: [ "id", "name" ]
-  ...
-```
-
-Apply examples:
-```
-{ "id": 1234, "name": { "first": "James" }, "age": 30 } 
- ->
-{ "age": 30 } 
-```
-```
-non-json string -> null
-```
-
-### REPLACE
-For json objects - replace target field's values with specified replacement string (by default with `***DATA_MASKED***`). Note: if target field's value is object, then replacement applied to all its fields recursively (see example). 
-
-```yaml
-- type: REPLACE
-  fields: [ "id", "name" ]
-  replacement: "***"  #optional, "***DATA_MASKED***" by default
-  ...
-```
-
-Apply examples:
-```
-{ "id": 1234, "name": { "first": "James", "last": "Bond" }, "age": 30 } 
- ->
-{ "id": "***", "name": { "first": "***", "last": "***" }, "age": 30 } 
-```
-```
-non-json string -> ***
-```
-
-### MASK
-Mask target field's values with specified masking characters, recursively (spaces and line separators will be kept as-is).
-`pattern` array specifies what symbols will be used to replace upper-case chars, lower-case chars, digits and other symbols correspondingly.
-
-```yaml
-- type: MASK
-  fields: [ "id", "name" ]
-  pattern: ["A", "a", "N", "_"]   # optional, default is ["X", "x", "n", "-"]
-  ...
-```
-
-Apply examples:
-```
-{ "id": 1234, "name": { "first": "James", "last": "Bond!" }, "age": 30 } 
- ->
-{ "id": "NNNN", "name": { "first": "Aaaaa", "last": "Aaaa_" }, "age": 30 } 
-```
-```
-Some string! -> Aaaa aaaaaa_
-```
-
-----
-
-For each policy, if `fields` not specified, then policy will be applied to all object's fields or whole string if it is not a json-object.
-
-You can specify which masks will be applied to topic's keys/values. Multiple policies will be applied if topic matches both policy's patterns.
-
-Yaml configuration example:
-```yaml
-kafka:
-  clusters:
-    - name: ClusterName
-      # Other Cluster configuration omitted ... 
-      masking:
-        - type: REMOVE
-          fields: [ "id" ]
-          topicKeysPattern: "events-with-ids-.*"
-          topicValuesPattern: "events-with-ids-.*"
-          
-        - type: REPLACE
-          fields: [ "companyName", "organizationName" ]
-          replacement: "***MASKED_ORG_NAME***"   #optional
-          topicValuesPattern: "org-events-.*"
-        
-        - type: MASK
-          fields: [ "name", "surname" ]
-          pattern: ["A", "a", "N", "_"]  #optional
-          topicValuesPattern: "user-states"
-
-        - type: MASK
-          topicValuesPattern: "very-secured-topic"
-```
-
-Same configuration in env-vars fashion:
-```
-...
-KAFKA_CLUSTERS_0_MASKING_0_TYPE: REMOVE
-KAFKA_CLUSTERS_0_MASKING_0_FIELDS_0: "id"
-KAFKA_CLUSTERS_0_MASKING_0_TOPICKEYSPATTERN: "events-with-ids-.*"
-KAFKA_CLUSTERS_0_MASKING_0_TOPICVALUESPATTERN: "events-with-ids-.*"
-
-KAFKA_CLUSTERS_0_MASKING_1_TYPE: REPLACE
-KAFKA_CLUSTERS_0_MASKING_1_FIELDS_0: "companyName"
-KAFKA_CLUSTERS_0_MASKING_1_FIELDS_1: "organizationName"
-KAFKA_CLUSTERS_0_MASKING_1_REPLACEMENT: "***MASKED_ORG_NAME***"
-KAFKA_CLUSTERS_0_MASKING_1_TOPICVALUESPATTERN: "org-events-.*"
-
-KAFKA_CLUSTERS_0_MASKING_2_TYPE: MASK
-KAFKA_CLUSTERS_0_MASKING_2_FIELDS_0: "name"
-KAFKA_CLUSTERS_0_MASKING_2_FIELDS_1: "surname"
-KAFKA_CLUSTERS_0_MASKING_2_PATTERN_0: 'A'
-KAFKA_CLUSTERS_0_MASKING_2_PATTERN_1: 'a'
-KAFKA_CLUSTERS_0_MASKING_2_PATTERN_2: 'N'
-KAFKA_CLUSTERS_0_MASKING_2_PATTERN_3: '_'
-KAFKA_CLUSTERS_0_MASKING_2_TOPICVALUESPATTERN: "user-states"
-
-KAFKA_CLUSTERS_0_MASKING_3_TYPE: MASK
-KAFKA_CLUSTERS_0_MASKING_3_TOPICVALUESPATTERN: "very-secured-topic"
-```

+ 0 - 51
documentation/guides/Protobuf.md

@@ -1,51 +0,0 @@
-# Kafkaui Protobuf Support
-
-### This document is deprecated, please see examples in [Serialization document](Serialization.md).
-
-Kafkaui supports deserializing protobuf messages in two ways:
-1. Using Confluent Schema Registry's [protobuf support](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html).
-2. Supplying a protobuf file as well as a configuration that maps topic names to protobuf types.
-
-## Configuring Kafkaui with a Protobuf File
-
-To configure Kafkaui to deserialize protobuf messages using a supplied protobuf schema add the following to the config:
-```yaml
-kafka:
-  clusters:
-    - # Cluster configuration omitted.
-      # protobufFile is the path to the protobuf schema. (deprecated: please use "protobufFiles")
-      protobufFile: path/to/my.proto
-      # protobufFiles is the path to one or more protobuf schemas.
-      protobufFiles: 
-        - /path/to/my.proto
-        - /path/to/another.proto
-      # protobufMessageName is the default protobuf type that is used to deserilize
-      # the message's value if the topic is not found in protobufMessageNameByTopic.
-      protobufMessageName: my.DefaultValType
-      # protobufMessageNameByTopic is a mapping of topic names to protobuf types.
-      # This mapping is required and is used to deserialize the Kafka message's value.
-      protobufMessageNameByTopic:
-        topic1: my.Type1
-        topic2: my.Type2
-      # protobufMessageNameForKey is the default protobuf type that is used to deserilize
-      # the message's key if the topic is not found in protobufMessageNameForKeyByTopic.
-      protobufMessageNameForKey: my.DefaultKeyType
-      # protobufMessageNameForKeyByTopic is a mapping of topic names to protobuf types.
-      # This mapping is optional and is used to deserialize the Kafka message's key.
-      # If a protobuf type is not found for a topic's key, the key is deserialized as a string,
-      # unless protobufMessageNameForKey is specified.
-      protobufMessageNameForKeyByTopic:
-        topic1: my.KeyType1
-```
-
-Same config with flattened config (for docker-compose):
-
-```text
-kafka.clusters.0.protobufFiles.0: /path/to/my.proto
-kafka.clusters.0.protobufFiles.1: /path/to/another.proto
-kafka.clusters.0.protobufMessageName: my.DefaultValType
-kafka.clusters.0.protobufMessageNameByTopic.topic1: my.Type1
-kafka.clusters.0.protobufMessageNameByTopic.topic2: my.Type2
-kafka.clusters.0.protobufMessageNameForKey: my.DefaultKeyType
-kafka.clusters.0.protobufMessageNameForKeyByTopic.topic1: my.KeyType1
-```

+ 0 - 58
documentation/guides/SASL_SCRAM.md

@@ -1,58 +0,0 @@
-# How to configure SASL SCRAM Authentication
-
-You could pass sasl configs in properties section for each cluster.
- 
-## Examples: 
-
-Please replace 
-- <KAFKA_NAME> with cluster name
-- <KAFKA_URL> with broker list
-- <KAFKA_USERNAME> with username
-- <KAFKA_PASSWORD> with password
-
-### Running From Docker Image
-
-```sh
-docker run -p 8080:8080 \
-    -e KAFKA_CLUSTERS_0_NAME=<KAFKA_NAME> \
-    -e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<KAFKA_URL> \
-    -e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \
-    -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=SCRAM-SHA-512 \     
-    -e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=org.apache.kafka.common.security.scram.ScramLoginModule required username="<KAFKA_USERNAME>" password="<KAFKA_PASSWORD>"; \
-    -d provectuslabs/kafka-ui:latest 
-```
-
-### Running From Docker-compose file
-
-```yaml
-
-version: '3.4'
-services:
-  
-  kafka-ui:
-    image: provectuslabs/kafka-ui
-    container_name: kafka-ui
-    ports:
-      - "888:8080"
-    restart: always
-    environment:
-      - KAFKA_CLUSTERS_0_NAME=<KAFKA_NAME>
-      - KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<KAFKA_URL>
-      - KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL
-      - KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=SCRAM-SHA-512
-      - KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=org.apache.kafka.common.security.scram.ScramLoginModule required username="<KAFKA_USERNAME>" password="<KAFKA_PASSWORD>";
-      - KAFKA_CLUSTERS_0_PROPERTIES_PROTOCOL=SASL
-```
-
-### Configuring by application.yaml
-
-```yaml
-kafka:
-  clusters:
-    - name: local
-      bootstrapServers: <KAFKA_URL>
-      properties:
-        security.protocol: SASL_SSL
-        sasl.mechanism: SCRAM-SHA-512        
-        sasl.jaas.config: org.apache.kafka.common.security.scram.ScramLoginModule required username="<KAFKA_USERNAME>" password="<KAFKA_PASSWORD>";
-```

+ 0 - 7
documentation/guides/SECURE_BROKER.md

@@ -1,7 +0,0 @@
-## Connecting to a Secure Broker
-
-The app supports TLS (SSL) and SASL connections for [encryption and authentication](http://kafka.apache.org/090/documentation.html#security). <br/>
-
-### Running From Docker-compose file
-
-See [this](/documentation/compose/kafka-ssl.yml) docker-compose file reference for ssl-enabled kafka

+ 0 - 71
documentation/guides/SSO.md

@@ -1,71 +0,0 @@
-# How to configure SSO
-SSO require additionaly to configure TLS for application, in that example we will use self-signed certificate, in case of use legal certificates please skip step 1.
-## Step 1
-At this step we will generate self-signed PKCS12 keypair.
-``` bash
-mkdir cert
-keytool -genkeypair -alias ui-for-apache-kafka -keyalg RSA -keysize 2048 \
-  -storetype PKCS12 -keystore cert/ui-for-apache-kafka.p12 -validity 3650
-```
-## Step 2
-Create new application in any SSO provider, we will continue with [Auth0](https://auth0.com).
-
-<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-new-app.png" width="70%"/>
-
-After that need to provide callback URLs, in our case we will use `https://127.0.0.1:8080/login/oauth2/code/auth0`
-
-<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-configuration.png" width="70%"/>
-
-This is a main parameters required for enabling SSO
-
-<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-parameters.png" width="70%"/>
-
-## Step 3
-To launch UI for Apache Kafka with enabled TLS and SSO run following:
-``` bash
-docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_TYPE=LOGIN_FORM \
-  -e SECURITY_BASIC_ENABLED=true \
-  -e SERVER_SSL_KEY_STORE_TYPE=PKCS12 \
-  -e SERVER_SSL_KEY_STORE=/opt/cert/ui-for-apache-kafka.p12 \
-  -e SERVER_SSL_KEY_STORE_PASSWORD=123456 \
-  -e SERVER_SSL_KEY_ALIAS=ui-for-apache-kafka \
-  -e SERVER_SSL_ENABLED=true \
-  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
-  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
-  -e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
-  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_SCOPE=openid \
-  -e TRUST_STORE=/opt/cert/ui-for-apache-kafka.p12 \
-  -e TRUST_STORE_PASSWORD=123456 \
-provectuslabs/kafka-ui:latest
-```
-In the case with trusted CA-signed SSL certificate and SSL termination somewhere outside of application we can pass only SSO related environment variables:
-``` bash
-docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_TYPE=OAUTH2 \
-  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
-  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
-  -e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
-  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_SCOPE=openid \
-provectuslabs/kafka-ui:latest
-```
-
-## Step 4 (Load Balancer HTTP) (optional)
-If you're using load balancer/proxy and use HTTP between the proxy and the app, you might want to set `server_forward-headers-strategy` to `native` as well (`SERVER_FORWARDHEADERSSTRATEGY=native`), for more info refer to [this issue](https://github.com/provectus/kafka-ui/issues/1017).
-
-## Step 5 (Azure) (optional)
-For Azure AD (Office365) OAUTH2 you'll want to add additional environment variables:
-
-```bash
-docker run -p 8080:8080 \
-        -e KAFKA_CLUSTERS_0_NAME="${cluster_name}"\
-        -e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS="${kafka_listeners}" \
-        -e KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS="${kafka_connect_servers}"
-        -e AUTH_TYPE=OAUTH2 \
-        -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
-        -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
-        -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_SCOPE="https://graph.microsoft.com/User.Read" \
-        -e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI="https://login.microsoftonline.com/{tenant-id}/v2.0" \
-        -d provectuslabs/kafka-ui:latest"
-```
-
-Note that scope is created by default when Application registration is done in Azure portal.
-You'll need to update application registration manifest to include `"accessTokenAcceptedVersion": 2`

+ 0 - 169
documentation/guides/Serialization.md

@@ -1,169 +0,0 @@
-## Serialization and deserialization and custom plugins
-
-Kafka-ui supports multiple ways to serialize/deserialize data.
-
-
-### Int32, Int64, UInt32, UInt64
-Big-endian 4/8 bytes representation of signed/unsigned integers.
-
-### Base64
-Base64 (RFC4648) binary data representation. Can be useful in case if the actual data is not important, but exactly the same (byte-wise) key/value should be send.
-
-### String 
-Treats binary data as a string in specified encoding. Default encoding is UTF-8.
-
-Class name: `com.provectus.kafka.ui.serdes.builtin.StringSerde`
-
-Sample configuration (if you want to overwrite default configuration):
-```yaml
-kafka:
-  clusters:
-    - name: Cluster1
-      # Other Cluster configuration omitted ... 
-      serdes:
-          # registering String serde with custom config
-        - name: AsciiString
-          className: com.provectus.kafka.ui.serdes.builtin.StringSerde
-          properties:
-            encoding: "ASCII"
-        
-          # overriding build-it String serde config   
-        - name: String 
-          properties:
-            encoding: "UTF-16"
-```
-
-### Protobuf
-
-Class name: `com.provectus.kafka.ui.serdes.builtin.ProtobufFileSerde`
-
-Sample configuration:
-```yaml
-kafka:
-  clusters:
-    - name: Cluster1
-      # Other Cluster configuration omitted ... 
-      serdes:
-        - name: ProtobufFile
-          properties:
-            # path to the protobuf schema files
-            protobufFiles:
-              - path/to/my.proto
-              - path/to/another.proto
-            # default protobuf type that is used for KEY serialization/deserialization
-            # optional
-            protobufMessageNameForKey: my.Type1
-            # mapping of topic names to protobuf types, that will be used for KEYS  serialization/deserialization
-            # optional
-            protobufMessageNameForKeyByTopic:
-              topic1: my.KeyType1
-              topic2: my.KeyType2
-            # default protobuf type that is used for VALUE serialization/deserialization
-            # optional, if not set - first type in file will be used as default
-            protobufMessageName: my.Type1
-            # mapping of topic names to protobuf types, that will be used for VALUES  serialization/deserialization
-            # optional
-            protobufMessageNameByTopic:
-              topic1: my.Type1
-              "topic.2": my.Type2
-```
-Docker-compose sample for Protobuf serialization is [here](../compose/kafka-ui-serdes.yaml).
-
-Legacy configuration for protobuf is [here](Protobuf.md).
-
-### SchemaRegistry
-SchemaRegistry serde is automatically configured if schema registry properties set on cluster level.
-But you can add new SchemaRegistry-typed serdes that will connect to another schema-registry instance. 
-
-Class name: `com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde`
-
-Sample configuration:
-```yaml
-kafka:
-  clusters:
-    - name: Cluster1
-      # this url will be used by "SchemaRegistry" by default
-      schemaRegistry: http://main-schema-registry:8081
-      serdes:
-        - name: AnotherSchemaRegistry
-          className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
-          properties:
-            url:  http://another-schema-registry:8081
-            # auth properties, optional
-            username: nameForAuth
-            password: P@ssW0RdForAuth
-        
-          # and also add another SchemaRegistry serde
-        - name: ThirdSchemaRegistry
-          className: com.provectus.kafka.ui.serdes.builtin.sr.SchemaRegistrySerde
-          properties:
-            url:  http://another-yet-schema-registry:8081
-```
-
-## Setting serdes for specific topics
-You can specify preferable serde for topics key/value. This serde will be chosen by default in UI on topic's view/produce pages. 
-To do so, set `topicValuesPattern/topicValuesPattern` properties for the selected serde. Kafka-ui will choose a first serde that matches specified pattern.
-
-Sample configuration:
-```yaml
-kafka:
-  clusters:
-    - name: Cluster1
-      serdes:
-        - name: String
-          topicKeysPattern: click-events|imp-events
-        
-        - name: Int64
-          topicKeysPattern: ".*-events"
-        
-        - name: SchemaRegistry
-          topicValuesPattern: click-events|imp-events
-```
-
-
-## Default serdes
-You can specify which serde will be chosen in UI by default if no other serdes selected via `topicKeysPattern/topicValuesPattern` settings.
-
-Sample configuration:
-```yaml
-kafka:
-  clusters:
-    - name: Cluster1
-      defaultKeySerde: Int32
-      defaultValueSerde: String
-      serdes:
-        - name: Int32
-          topicKeysPattern: click-events|imp-events
-```
-
-## Fallback
-If selected serde couldn't be applied (exception was thrown), then fallback (String serde with UTF-8 encoding) serde will be applied. Such messages will be specially highlighted in UI.
-
-## Custom pluggable serde registration
-You can implement your own serde and register it in kafka-ui application.
-To do so:
-1. Add `kafka-ui-serde-api` dependency (should be downloadable via maven central)
-2. Implement `com.provectus.kafka.ui.serde.api.Serde` interface. See javadoc for implementation requirements.
-3. Pack your serde into uber jar, or provide directory with no-dependency jar and it's dependencies jars
-
-
-Example pluggable serdes :
-https://github.com/provectus/kafkaui-smile-serde
-https://github.com/provectus/kafkaui-glue-sr-serde
-
-Sample configuration:
-```yaml
-kafka:
-  clusters:
-    - name: Cluster1
-      serdes:
-        - name: MyCustomSerde
-          className: my.lovely.org.KafkaUiSerde
-          filePath: /var/lib/kui-serde/my-kui-serde.jar
-          
-        - name: MyCustomSerde2
-          className: my.lovely.org.KafkaUiSerde2
-          filePath: /var/lib/kui-serde2
-          properties:
-            prop1: v1
-```

+ 0 - 22
documentation/project/ROADMAP.md

@@ -1,22 +0,0 @@
-Kafka-UI Project Roadmap
-====================
-
-Roadmap exists in a form of a github project board and is located [here](https://github.com/provectus/kafka-ui/projects/8).
-
-### How to use this document
-
-The roadmap provides a list of features we decided to prioritize in project development. It should serve as a reference point to understand projects' goals.
-
-We do prioritize them based on the feedback from the community, our own vision and other conditions and circumstances. 
-
-The roadmap sets the general way of development. The roadmap is mostly about long-term features. All the features could be re-prioritized, rescheduled or canceled.
-
-If there's no feature `X`, that **doesn't** mean we're **not** going to implement it. Feel free to raise the issue for the consideration. <br/>
-If a feature you want to see live is not present on roadmap, but there's an issue for the feature, feel free to vote for it using reactions in the issue.
-
-
-### How to contribute
-
-Since the roadmap consists mostly of big long-term features, implementing them might be not easy for a beginner outside collaborator.
-
-A good starting point is checking the [CONTRIBUTING.md](https://github.com/provectus/kafka-ui/blob/master/CONTRIBUTING.md) document.

+ 0 - 8
documentation/project/contributing/README.md

@@ -1,8 +0,0 @@
-# Contributing guidelines
-
-### Set up the local environment for development
-
-* [Prerequisites](prerequisites.md)
-<!--* [Setting up git](set-up-git.md)-->
-* [Building the app](building.md)
-* [Writing tests](testing.md)

+ 0 - 24
documentation/project/contributing/building-and-running-without-docker.md

@@ -1,24 +0,0 @@
-# Build & Run Without Docker
-
-Once you installed the prerequisites and cloned the repository, run the following steps in your project directory:
-
-## <a name="run_without_docker_quickly"></a> Running Without Docker Quickly
-
-- [Download the latest kafka-ui jar file](https://github.com/provectus/kafka-ui/releases)
-#### <a name="run_kafkaui_jar_file"></a> Execute the jar
-```sh
-java -Dspring.config.additional-location=<path-to-application-local.yml> -jar <path-to-kafka-ui-jar>
-```
-- Example of how to configure clusters in the [application-local.yml](https://github.com/provectus/kafka-ui/blob/master/kafka-ui-api/src/main/resources/application-local.yml) configuration file.
-
-## <a name="build_and_run_without_docker"></a> Building And Running Without Docker
-
-> **_NOTE:_**  If you want to get kafka-ui up and running locally quickly without building the jar file manually, then just follow [Running Without Docker Quickly](#run_without_docker_quickly)
-
-> Comment out `docker-maven-plugin` plugin in `kafka-ui-api` pom.xml
-
-- [Command to build the jar](./building.md#cmd_to_build_kafkaui_without_docker)
-
-> Once your build is successful and the jar file named kafka-ui-api-0.0.1-SNAPSHOT.jar is generated inside `kafka-ui-api/target`.
-
-- [Execute the jar](#run_kafkaui_jar_file)

+ 0 - 63
documentation/project/contributing/building.md

@@ -1,63 +0,0 @@
-# Build & Run
-
-Once you installed the prerequisites and cloned the repository, run the following steps in your project directory:
-
-## Step 1 : Build
-> **_NOTE:_**  If you are an macOS M1 User then please keep in mind below things
-
-> Make sure you have ARM supported java installed
-
-> Skip the maven tests as they might not be successful
-
-- Build a docker image with the app
-```sh
-./mvnw clean install -Pprod
-```
-- if you need to build the frontend `kafka-ui-react-app`, go here
-     - [kafka-ui-react-app-build-documentation](../../../kafka-ui-react-app/README.md)
-
-<a name="cmd_to_build_kafkaui_without_docker"></a>
-- In case you want to build `kafka-ui-api` by skipping the tests
-```sh
-./mvnw clean install -Dmaven.test.skip=true -Pprod
-```
-
-- To build only the `kafka-ui-api` you can use this command:
-```sh
-./mvnw -f kafka-ui-api/pom.xml clean install -Pprod -DskipUIBuild=true
-```
-
-If this step is successful, it should create a docker image named `provectuslabs/kafka-ui` with `latest` tag on your local machine except macOS M1.
-
-## Step 2 : Run
-#### Using Docker Compose
-> **_NOTE:_**  If you are an macOS M1 User then you can use arm64 supported docker compose script `./documentation/compose/kafka-ui-arm64.yaml`
- - Start the `kafka-ui` app using docker image built in step 1 along with Kafka clusters:
-```sh
-docker-compose -f ./documentation/compose/kafka-ui.yaml up -d
-```
-
-#### Using Spring Boot Run
- - If you want to start only kafka clusters (to run the `kafka-ui` app via `spring-boot:run`):
-```sh
-docker-compose -f ./documentation/compose/kafka-clusters-only.yaml up -d
-```
-- Then start the app.
-```sh
-./mvnw spring-boot:run -Pprod
-
-# or
-
-./mvnw spring-boot:run -Pprod -Dspring.config.location=file:///path/to/conf.yaml
-```
-
-#### Running in kubernetes
-- Using Helm Charts
-```sh bash
-helm repo add kafka-ui https://provectus.github.io/kafka-ui
-helm install kafka-ui kafka-ui/kafka-ui
-```
-To read more please follow to [chart documentation](../../../charts/kafka-ui/README.md).
-
-## Step 3 : Access Kafka-UI
- - To see the `kafka-ui` app running, navigate to http://localhost:8080.

+ 0 - 42
documentation/project/contributing/prerequisites.md

@@ -1,42 +0,0 @@
-### Prerequisites
-
-This page explains how to get the software you need to use a Linux or macOS
-machine for local development.
-
-Before you begin contributing you must have:
-
-* A GitHub account
-* `Java` 17 or newer
-* `Git`
-* `Docker`
-
-### Installing prerequisites on macOS
-
-1. Install [brew](https://brew.sh/).
-2. Install brew cask:
-```sh
-brew cask
-```
-3. Install Eclipse Temurin 17 via Homebrew cask:
-```sh
-brew tap homebrew/cask-versions
-brew install temurin17
-```
-4. Verify Installation
-```sh
-java -version
-```
-Note : In case OpenJDK 17 is not set as your default Java, you can consider to include it in your `$PATH` after installation
-```sh
-export PATH="$(/usr/libexec/java_home -v 17)/bin:$PATH"
-export JAVA_HOME="$(/usr/libexec/java_home -v 17)"
-```
-
-## Tips
-
-Consider allocating not less than 4GB of memory for your docker.
-Otherwise, some apps within a stack (e.g. `kafka-ui.yaml`) might crash.
-
-## Where to go next
-
-In the next section, you'll [learn how to Build and Run kafka-ui](building.md).

+ 0 - 8
documentation/project/contributing/set-up-git.md

@@ -1,8 +0,0 @@
-### Nothing special here yet.
-<!--
-TODO:
-
-1. Cloning
-2. Credentials set up (git user.name & email)
-3. Signing off (DCO)
--->

+ 0 - 28
documentation/project/contributing/testing.md

@@ -1,28 +0,0 @@
-# Testing
-
-
-
-## Test suites
-
-
-## Writing new tests
-
-
-### Writing tests for new features
-
-
-### Writing tests for bug fixes
-
-
-### Writing new integration tests
-
-
-
-## Running tests
-
-### Unit Tests
-
-
-### Integration Tests
-
-

+ 333 - 0
etc/checkstyle/checkstyle-e2e.xml

@@ -0,0 +1,333 @@
+<?xml version="1.0"?>
+<!DOCTYPE module PUBLIC
+        "-//Checkstyle//DTD Checkstyle Configuration 1.3//EN"
+        "https://checkstyle.org/dtds/configuration_1_3.dtd">
+
+<!--
+    Checkstyle configuration that checks the Google coding conventions from Google Java Style
+    that can be found at https://google.github.io/styleguide/javaguide.html
+
+    Checkstyle is very configurable. Be sure to read the documentation at
+    http://checkstyle.org (or in your downloaded distribution).
+
+    To completely disable a check, just comment it out or delete it from the file.
+    To suppress certain violations please review suppression filters.
+
+    Authors: Max Vetrenko, Ruslan Diachenko, Roman Ivanov.
+ -->
+
+<module name = "Checker">
+    <property name="charset" value="UTF-8"/>
+
+    <property name="severity" value="warning"/>
+
+    <property name="fileExtensions" value="java, properties, xml"/>
+    <!-- Excludes all 'module-info.java' files              -->
+    <!-- See https://checkstyle.org/config_filefilters.html -->
+    <module name="BeforeExecutionExclusionFileFilter">
+        <property name="fileNamePattern" value="module\-info\.java$"/>
+    </module>
+    <!-- https://checkstyle.org/config_filters.html#SuppressionFilter -->
+    <module name="SuppressionFilter">
+        <property name="file" value="${org.checkstyle.google.suppressionfilter.config}"
+                  default="checkstyle-suppressions.xml" />
+        <property name="optional" value="true"/>
+    </module>
+
+    <!-- Checks for whitespace                               -->
+    <!-- See http://checkstyle.org/config_whitespace.html -->
+    <module name="FileTabCharacter">
+        <property name="eachLine" value="true"/>
+    </module>
+
+    <module name="LineLength">
+        <property name="fileExtensions" value="java"/>
+        <property name="max" value="120"/>
+        <property name="ignorePattern" value="^package.*|^import.*|a href|href|http://|https://|ftp://"/>
+    </module>
+
+    <module name="TreeWalker">
+        <module name="OuterTypeFilename"/>
+        <module name="IllegalTokenText">
+            <property name="tokens" value="STRING_LITERAL, CHAR_LITERAL"/>
+            <property name="format"
+                      value="\\u00(09|0(a|A)|0(c|C)|0(d|D)|22|27|5(C|c))|\\(0(10|11|12|14|15|42|47)|134)"/>
+            <property name="message"
+                      value="Consider using special escape sequence instead of octal value or Unicode escaped value."/>
+        </module>
+        <module name="AvoidEscapedUnicodeCharacters">
+            <property name="allowEscapesForControlCharacters" value="true"/>
+            <property name="allowByTailComment" value="true"/>
+            <property name="allowNonPrintableEscapes" value="true"/>
+        </module>
+        <module name="AvoidStarImport"/>
+        <module name="OneTopLevelClass"/>
+        <module name="NoLineWrap">
+            <property name="tokens" value="PACKAGE_DEF, IMPORT, STATIC_IMPORT"/>
+        </module>
+        <module name="EmptyBlock">
+            <property name="option" value="TEXT"/>
+            <property name="tokens"
+                      value="LITERAL_TRY, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE, LITERAL_SWITCH"/>
+        </module>
+        <module name="NeedBraces">
+            <property name="tokens"
+                      value="LITERAL_DO, LITERAL_ELSE, LITERAL_FOR, LITERAL_IF, LITERAL_WHILE"/>
+        </module>
+        <module name="LeftCurly">
+            <property name="tokens"
+                      value="ANNOTATION_DEF, CLASS_DEF, CTOR_DEF, ENUM_CONSTANT_DEF, ENUM_DEF,
+                    INTERFACE_DEF, LAMBDA, LITERAL_CASE, LITERAL_CATCH, LITERAL_DEFAULT,
+                    LITERAL_DO, LITERAL_ELSE, LITERAL_FINALLY, LITERAL_FOR, LITERAL_IF,
+                    LITERAL_SWITCH, LITERAL_SYNCHRONIZED, LITERAL_TRY, LITERAL_WHILE, METHOD_DEF,
+                    OBJBLOCK, STATIC_INIT"/>
+        </module>
+        <module name="RightCurly">
+            <property name="id" value="RightCurlySame"/>
+            <property name="tokens"
+                      value="LITERAL_TRY, LITERAL_CATCH, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE,
+                    LITERAL_DO"/>
+        </module>
+        <module name="RightCurly">
+            <property name="id" value="RightCurlyAlone"/>
+            <property name="option" value="alone"/>
+            <property name="tokens"
+                      value="CLASS_DEF, METHOD_DEF, CTOR_DEF, LITERAL_FOR, LITERAL_WHILE, STATIC_INIT,
+                    INSTANCE_INIT, ANNOTATION_DEF, ENUM_DEF"/>
+        </module>
+        <module name="SuppressionXpathSingleFilter">
+            <!-- suppresion is required till https://github.com/checkstyle/checkstyle/issues/7541 -->
+            <property name="id" value="RightCurlyAlone"/>
+            <property name="query" value="//RCURLY[parent::SLIST[count(./*)=1]
+                                                 or preceding-sibling::*[last()][self::LCURLY]]"/>
+        </module>
+        <module name="WhitespaceAfter">
+            <property name="tokens"
+                      value="COMMA, SEMI, TYPECAST, LITERAL_IF, LITERAL_ELSE,
+                    LITERAL_WHILE, LITERAL_DO, LITERAL_FOR, DO_WHILE"/>
+        </module>
+        <module name="WhitespaceAround">
+            <property name="allowEmptyConstructors" value="true"/>
+            <property name="allowEmptyLambdas" value="true"/>
+            <property name="allowEmptyMethods" value="true"/>
+            <property name="allowEmptyTypes" value="true"/>
+            <property name="allowEmptyLoops" value="true"/>
+            <property name="tokens"
+                      value="ASSIGN, BAND, BAND_ASSIGN, BOR, BOR_ASSIGN, BSR, BSR_ASSIGN, BXOR,
+                    BXOR_ASSIGN, COLON, DIV, DIV_ASSIGN, DO_WHILE, EQUAL, GE, GT, LAMBDA, LAND,
+                    LCURLY, LE, LITERAL_CATCH, LITERAL_DO, LITERAL_ELSE, LITERAL_FINALLY,
+                    LITERAL_FOR, LITERAL_IF, LITERAL_RETURN, LITERAL_SWITCH, LITERAL_SYNCHRONIZED,
+                     LITERAL_TRY, LITERAL_WHILE, LOR, LT, MINUS, MINUS_ASSIGN, MOD, MOD_ASSIGN,
+                     NOT_EQUAL, PLUS, PLUS_ASSIGN, QUESTION, RCURLY, SL, SLIST, SL_ASSIGN, SR,
+                     SR_ASSIGN, STAR, STAR_ASSIGN, LITERAL_ASSERT, TYPE_EXTENSION_AND"/>
+            <message key="ws.notFollowed"
+                     value="WhitespaceAround: ''{0}'' is not followed by whitespace. Empty blocks may only be represented as '{}' when not part of a multi-block statement (4.1.3)"/>
+            <message key="ws.notPreceded"
+                     value="WhitespaceAround: ''{0}'' is not preceded with whitespace."/>
+        </module>
+        <module name="OneStatementPerLine"/>
+<!--        <module name="MultipleVariableDeclarations"/>-->
+        <module name="ArrayTypeStyle"/>
+        <module name="MissingSwitchDefault"/>
+        <module name="FallThrough"/>
+        <module name="UpperEll"/>
+        <module name="ModifierOrder"/>
+        <module name="EmptyLineSeparator">
+            <property name="tokens"
+                      value="PACKAGE_DEF, IMPORT, STATIC_IMPORT, CLASS_DEF, INTERFACE_DEF, ENUM_DEF,
+                    STATIC_INIT, INSTANCE_INIT, METHOD_DEF, CTOR_DEF, VARIABLE_DEF"/>
+            <property name="allowNoEmptyLineBetweenFields" value="true"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapDot"/>
+            <property name="tokens" value="DOT"/>
+            <property name="option" value="nl"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapComma"/>
+            <property name="tokens" value="COMMA"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <!-- ELLIPSIS is EOL until https://github.com/google/styleguide/issues/258 -->
+            <property name="id" value="SeparatorWrapEllipsis"/>
+            <property name="tokens" value="ELLIPSIS"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <!-- ARRAY_DECLARATOR is EOL until https://github.com/google/styleguide/issues/259 -->
+            <property name="id" value="SeparatorWrapArrayDeclarator"/>
+            <property name="tokens" value="ARRAY_DECLARATOR"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapMethodRef"/>
+            <property name="tokens" value="METHOD_REF"/>
+            <property name="option" value="nl"/>
+        </module>
+        <module name="PackageName">
+            <property name="format" value="^[a-z]+(\.[a-z][a-z0-9]*)*$"/>
+            <message key="name.invalidPattern"
+                     value="Package name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="TypeName">
+            <property name="tokens" value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, ANNOTATION_DEF"/>
+            <message key="name.invalidPattern"
+                     value="Type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="MemberName">
+            <property name="format" value="^[a-z][a-z0-9][a-zA-Z0-9]*$"/>
+            <message key="name.invalidPattern"
+                     value="Member name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="ParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern"
+                     value="Parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="LambdaParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern"
+                     value="Lambda parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="CatchParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern"
+                     value="Catch parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="LocalVariableName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern"
+                     value="Local variable name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="ClassTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern"
+                     value="Class type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="MethodTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern"
+                     value="Method type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="InterfaceTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern"
+                     value="Interface type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="NoFinalizer"/>
+        <module name="GenericWhitespace">
+            <message key="ws.followed"
+                     value="GenericWhitespace ''{0}'' is followed by whitespace."/>
+            <message key="ws.preceded"
+                     value="GenericWhitespace ''{0}'' is preceded with whitespace."/>
+            <message key="ws.illegalFollow"
+                     value="GenericWhitespace ''{0}'' should followed by whitespace."/>
+            <message key="ws.notPreceded"
+                     value="GenericWhitespace ''{0}'' is not preceded with whitespace."/>
+        </module>
+        <module name="Indentation">
+            <property name="basicOffset" value="2"/>
+            <property name="braceAdjustment" value="0"/>
+            <property name="caseIndent" value="2"/>
+            <property name="throwsIndent" value="4"/>
+            <property name="lineWrappingIndentation" value="4"/>
+            <property name="arrayInitIndent" value="2"/>
+        </module>
+        <module name="AbbreviationAsWordInName">
+            <property name="ignoreFinal" value="false"/>
+            <property name="allowedAbbreviationLength" value="1"/>
+            <property name="tokens"
+                      value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, ANNOTATION_DEF, ANNOTATION_FIELD_DEF,
+                    PARAMETER_DEF, VARIABLE_DEF, METHOD_DEF"/>
+        </module>
+        <module name="OverloadMethodsDeclarationOrder"/>
+<!--        <module name="VariableDeclarationUsageDistance"/>-->
+        <module name="CustomImportOrder">
+            <property name="sortImportsInGroupAlphabetically" value="true"/>
+            <property name="separateLineBetweenGroups" value="true"/>
+            <property name="customImportOrderRules" value="STATIC###THIRD_PARTY_PACKAGE"/>
+            <property name="tokens" value="IMPORT, STATIC_IMPORT, PACKAGE_DEF"/>
+        </module>
+        <module name="MethodParamPad">
+            <property name="tokens"
+                      value="CTOR_DEF, LITERAL_NEW, METHOD_CALL, METHOD_DEF,
+                    SUPER_CTOR_CALL, ENUM_CONSTANT_DEF"/>
+        </module>
+        <module name="NoWhitespaceBefore">
+            <property name="tokens"
+                      value="COMMA, SEMI, POST_INC, POST_DEC, DOT, ELLIPSIS,
+                    LABELED_STAT, METHOD_REF"/>
+            <property name="allowLineBreaks" value="true"/>
+        </module>
+        <module name="ParenPad">
+            <property name="tokens"
+                      value="ANNOTATION, ANNOTATION_FIELD_DEF, CTOR_CALL, CTOR_DEF, DOT, ENUM_CONSTANT_DEF,
+                    EXPR, LITERAL_CATCH, LITERAL_DO, LITERAL_FOR, LITERAL_IF, LITERAL_NEW,
+                    LITERAL_SWITCH, LITERAL_SYNCHRONIZED, LITERAL_WHILE, METHOD_CALL,
+                    METHOD_DEF, QUESTION, RESOURCE_SPECIFICATION, SUPER_CTOR_CALL, LAMBDA"/>
+        </module>
+        <module name="OperatorWrap">
+            <property name="option" value="NL"/>
+            <property name="tokens"
+                      value="BAND, BOR, BSR, BXOR, DIV, EQUAL, GE, GT, LAND, LE, LITERAL_INSTANCEOF, LOR,
+                    LT, MINUS, MOD, NOT_EQUAL, PLUS, QUESTION, SL, SR, STAR, METHOD_REF "/>
+        </module>
+        <module name="AnnotationLocation">
+            <property name="id" value="AnnotationLocationMostCases"/>
+            <property name="tokens"
+                      value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, METHOD_DEF, CTOR_DEF"/>
+        </module>
+        <module name="AnnotationLocation">
+            <property name="id" value="AnnotationLocationVariables"/>
+            <property name="tokens" value="VARIABLE_DEF"/>
+            <property name="allowSamelineMultipleAnnotations" value="true"/>
+        </module>
+        <module name="NonEmptyAtclauseDescription"/>
+        <module name="InvalidJavadocPosition"/>
+        <module name="JavadocTagContinuationIndentation"/>
+        <module name="SummaryJavadoc">
+            <property name="forbiddenSummaryFragments"
+                      value="^@return the *|^This method returns |^A [{]@code [a-zA-Z0-9]+[}]( is a )"/>
+        </module>
+        <module name="JavadocParagraph"/>
+        <module name="AtclauseOrder">
+            <property name="tagOrder" value="@param, @return, @throws, @deprecated"/>
+            <property name="target"
+                      value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, METHOD_DEF, CTOR_DEF, VARIABLE_DEF"/>
+        </module>
+        <module name="JavadocMethod">
+            <property name="accessModifiers" value="public"/>
+            <property name="allowMissingParamTags" value="true"/>
+            <property name="allowMissingReturnTag" value="true"/>
+            <property name="allowedAnnotations" value="Override, Test"/>
+            <property name="tokens" value="METHOD_DEF, CTOR_DEF, ANNOTATION_FIELD_DEF"/>
+        </module>
+<!--        <module name="MissingJavadocMethod">-->
+<!--            <property name="scope" value="public"/>-->
+<!--            <property name="minLineCount" value="2"/>-->
+<!--            <property name="allowedAnnotations" value="Override, Test"/>-->
+<!--            <property name="tokens" value="METHOD_DEF, CTOR_DEF, ANNOTATION_FIELD_DEF"/>-->
+<!--        </module>-->
+        <module name="MethodName">
+            <property name="format" value="^[a-z][a-z0-9][a-zA-Z0-9_]*$"/>
+            <message key="name.invalidPattern"
+                     value="Method name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="SingleLineJavadoc">
+            <property name="ignoreInlineTags" value="false"/>
+        </module>
+        <module name="EmptyCatchBlock">
+            <property name="exceptionVariableName" value="ignored"/>
+        </module>
+        <module name="CommentsIndentation">
+            <property name="tokens" value="SINGLE_LINE_COMMENT, BLOCK_COMMENT_BEGIN"/>
+        </module>
+        <!-- https://checkstyle.org/config_filters.html#SuppressionXpathFilter -->
+        <module name="SuppressionXpathFilter">
+            <property name="file" value="${org.checkstyle.google.suppressionxpathfilter.config}"
+                      default="checkstyle-xpath-suppressions.xml" />
+            <property name="optional" value="true"/>
+        </module>
+    </module>
+</module>

+ 2 - 2
etc/checkstyle/checkstyle.xml

@@ -318,7 +318,7 @@
             <property name="ignoreInlineTags" value="false"/>
         </module>
         <module name="EmptyCatchBlock">
-            <property name="exceptionVariableName" value="expected"/>
+            <property name="exceptionVariableName" value="ignored"/>
         </module>
         <module name="CommentsIndentation">
             <property name="tokens" value="SINGLE_LINE_COMMENT, BLOCK_COMMENT_BEGIN"/>
@@ -330,4 +330,4 @@
             <property name="optional" value="true"/>
         </module>
     </module>
-</module>
+</module>

+ 0 - 65
helm_chart.md

@@ -1,65 +0,0 @@
-# Quick Start with Helm Chart
-
-### General
-1. Clone/Copy Chart to your working directory
-2. Execute command ```helm install helm-release-name charts/kafka-ui```
-
-### Passing Kafka-UI configuration as Dict
-Create values.yml file
-```
-yamlApplicationConfig:
-  kafka:
-    clusters:
-      - name: yaml
-        bootstrapServers:  kafka-cluster-broker-endpoints:9092
-  auth:
-    type: disabled
-  management:
-    health:
-      ldap:
-        enabled: false
-```
-Install by executing command
-> helm install helm-release-name charts/kafka-ui -f values.yml
-
-
-### Passing configuration file as ConfigMap 
-Create config map
-```
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: kafka-ui-existing-configmap-as-a-configfile
-data:
-  config.yml: |-
-    kafka:
-      clusters:
-        - name: yaml
-          bootstrapServers: kafka-cluster-broker-endpoints:9092
-    auth:
-      type: disabled
-    management:
-      health:
-        ldap:
-          enabled: false
-```
-This ConfigMap will be mounted to the Pod
-
-Install by executing command
-> helm install helm-release-name charts/kafka-ui --set yamlApplicationConfigConfigMap.name="kafka-ui-config",yamlApplicationConfigConfigMap.keyName="config.yml"
-
-### Passing environment variables as ConfigMap
-Create config map
-```
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: kafka-ui-helm-values
-data:
-  KAFKA_CLUSTERS_0_NAME: "kafka-cluster-name"
-  KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: "kafka-cluster-broker-endpoints:9092"
-  AUTH_TYPE: "DISABLED"
-  MANAGEMENT_HEALTH_LDAP_ENABLED: "FALSE" 
-```
-Install by executing command
-> helm install helm-release-name charts/kafka-ui --set existingConfigMap="kafka-ui-helm-values"  

+ 8 - 2
kafka-ui-api/Dockerfile

@@ -1,8 +1,13 @@
-FROM azul/zulu-openjdk-alpine:17
+#FROM azul/zulu-openjdk-alpine:17-jre-headless
+FROM azul/zulu-openjdk-alpine@sha256:a36679ac0d28cb835e2a8c00e1e0d95509c6c51c5081c7782b85edb1f37a771a
 
 RUN apk add --no-cache gcompat # need to make snappy codec work
 RUN addgroup -S kafkaui && adduser -S kafkaui -G kafkaui
 
+# creating folder for dynamic config usage (certificates uploads, etc)
+RUN mkdir /etc/kafkaui/
+RUN chown kafkaui /etc/kafkaui
+
 USER kafkaui
 
 ARG JAR_FILE
@@ -12,4 +17,5 @@ ENV JAVA_OPTS=
 
 EXPOSE 8080
 
-CMD java $JAVA_OPTS -jar kafka-ui-api.jar
+# see JmxSslSocketFactory docs to understand why add-opens is needed
+CMD java --add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED  $JAVA_OPTS -jar kafka-ui-api.jar

+ 25 - 0
kafka-ui-api/pom.xml

@@ -199,6 +199,31 @@
             <version>${antlr4-maven-plugin.version}</version>
         </dependency>
 
+        <dependency>
+            <groupId>org.opendatadiscovery</groupId>
+            <artifactId>oddrn-generator-java</artifactId>
+            <version>${odd-oddrn-generator.version}</version>
+        </dependency>
+        <dependency>
+            <groupId>org.opendatadiscovery</groupId>
+            <artifactId>ingestion-contract-client</artifactId>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.springframework.boot</groupId>
+                    <artifactId>spring-boot-starter-webflux</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>io.projectreactor</groupId>
+                    <artifactId>reactor-core</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>io.projectreactor.ipc</groupId>
+                    <artifactId>reactor-netty</artifactId>
+                </exclusion>
+            </exclusions>
+            <version>${odd-oddrn-client.version}</version>
+        </dependency>
+
         <dependency>
             <groupId>org.springframework.security</groupId>
             <artifactId>spring-security-ldap</artifactId>

+ 11 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/KafkaUiApplication.java

@@ -1,8 +1,10 @@
 package com.provectus.kafka.ui;
 
-import org.springframework.boot.SpringApplication;
+import com.provectus.kafka.ui.util.DynamicConfigOperations;
 import org.springframework.boot.autoconfigure.SpringBootApplication;
 import org.springframework.boot.autoconfigure.ldap.LdapAutoConfiguration;
+import org.springframework.boot.builder.SpringApplicationBuilder;
+import org.springframework.context.ConfigurableApplicationContext;
 import org.springframework.scheduling.annotation.EnableAsync;
 import org.springframework.scheduling.annotation.EnableScheduling;
 
@@ -12,6 +14,13 @@ import org.springframework.scheduling.annotation.EnableScheduling;
 public class KafkaUiApplication {
 
   public static void main(String[] args) {
-    SpringApplication.run(KafkaUiApplication.class, args);
+    startApplication(args);
+  }
+
+  public static ConfigurableApplicationContext startApplication(String[] args) {
+    return new SpringApplicationBuilder(KafkaUiApplication.class)
+        .initializers(DynamicConfigOperations.dynamicConfigPropertiesInitializer())
+        .build()
+        .run(args);
   }
 }

+ 0 - 22
kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KafkaConnectClientsFactory.java

@@ -1,22 +0,0 @@
-package com.provectus.kafka.ui.client;
-
-import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
-import com.provectus.kafka.ui.model.KafkaConnectCluster;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import org.springframework.beans.factory.annotation.Value;
-import org.springframework.stereotype.Service;
-import org.springframework.util.unit.DataSize;
-
-@Service
-public class KafkaConnectClientsFactory {
-
-  @Value("${webclient.max-in-memory-buffer-size:20MB}")
-  private DataSize maxBuffSize;
-
-  private final Map<String, KafkaConnectClientApi> cache = new ConcurrentHashMap<>();
-
-  public KafkaConnectClientApi withKafkaConnectConfig(KafkaConnectCluster config) {
-    return cache.computeIfAbsent(config.getAddress(), s -> new RetryingKafkaConnectClient(config, maxBuffSize));
-  }
-}

+ 206 - 106
kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/RetryingKafkaConnectClient.java

@@ -1,49 +1,33 @@
 package com.provectus.kafka.ui.client;
 
-import com.fasterxml.jackson.databind.DeserializationFeature;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule;
+import static com.provectus.kafka.ui.config.ClustersProperties.ConnectCluster;
+
+import com.provectus.kafka.ui.config.ClustersProperties;
 import com.provectus.kafka.ui.connect.ApiClient;
-import com.provectus.kafka.ui.connect.RFC3339DateFormat;
 import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
 import com.provectus.kafka.ui.connect.model.Connector;
+import com.provectus.kafka.ui.connect.model.ConnectorPlugin;
+import com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse;
+import com.provectus.kafka.ui.connect.model.ConnectorStatus;
+import com.provectus.kafka.ui.connect.model.ConnectorTask;
+import com.provectus.kafka.ui.connect.model.ConnectorTopics;
 import com.provectus.kafka.ui.connect.model.NewConnector;
+import com.provectus.kafka.ui.connect.model.TaskStatus;
 import com.provectus.kafka.ui.exception.KafkaConnectConflictReponseException;
 import com.provectus.kafka.ui.exception.ValidationException;
-import com.provectus.kafka.ui.model.InternalSchemaRegistry;
-import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.KafkaConnectCluster;
-import com.provectus.kafka.ui.util.SecuredWebClient;
-import io.netty.handler.ssl.SslContext;
-import io.netty.handler.ssl.SslContextBuilder;
-import java.io.FileInputStream;
-import java.security.KeyStore;
-import java.text.DateFormat;
+import com.provectus.kafka.ui.util.WebClientConfigurator;
 import java.time.Duration;
 import java.util.List;
 import java.util.Map;
-import java.util.TimeZone;
-import javax.net.ssl.KeyManagerFactory;
-import javax.net.ssl.TrustManagerFactory;
+import javax.annotation.Nullable;
 import lombok.extern.slf4j.Slf4j;
-import org.openapitools.jackson.nullable.JsonNullableModule;
-import org.springframework.core.ParameterizedTypeReference;
-import org.springframework.http.HttpHeaders;
-import org.springframework.http.HttpMethod;
-import org.springframework.http.MediaType;
-import org.springframework.http.client.reactive.ReactorClientHttpConnector;
-import org.springframework.http.codec.json.Jackson2JsonDecoder;
-import org.springframework.http.codec.json.Jackson2JsonEncoder;
-import org.springframework.util.MultiValueMap;
-import org.springframework.util.ResourceUtils;
+import org.springframework.http.ResponseEntity;
 import org.springframework.util.unit.DataSize;
 import org.springframework.web.client.RestClientException;
-import org.springframework.web.reactive.function.client.ExchangeStrategies;
 import org.springframework.web.reactive.function.client.WebClient;
 import org.springframework.web.reactive.function.client.WebClientResponseException;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
-import reactor.netty.http.client.HttpClient;
 import reactor.util.retry.Retry;
 
 @Slf4j
@@ -51,8 +35,10 @@ public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
   private static final int MAX_RETRIES = 5;
   private static final Duration RETRIES_DELAY = Duration.ofMillis(200);
 
-  public RetryingKafkaConnectClient(KafkaConnectCluster config, DataSize maxBuffSize) {
-    super(new RetryingApiClient(config, maxBuffSize));
+  public RetryingKafkaConnectClient(ConnectCluster config,
+                                    @Nullable ClustersProperties.TruststoreConfig truststoreConfig,
+                                    DataSize maxBuffSize) {
+    super(new RetryingApiClient(config, truststoreConfig, maxBuffSize));
   }
 
   private static Retry conflictCodeRetry() {
@@ -95,90 +81,204 @@ public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
     );
   }
 
-  private static class RetryingApiClient extends ApiClient {
+  @Override
+  public Mono<ResponseEntity<Connector>> createConnectorWithHttpInfo(NewConnector newConnector)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.createConnectorWithHttpInfo(newConnector));
+  }
 
-    private static final DateFormat dateFormat = getDefaultDateFormat();
-    private static final ObjectMapper mapper = buildObjectMapper(dateFormat);
+  @Override
+  public Mono<Void> deleteConnector(String connectorName) throws WebClientResponseException {
+    return withRetryOnConflict(super.deleteConnector(connectorName));
+  }
 
-    public RetryingApiClient(KafkaConnectCluster config, DataSize maxBuffSize) {
-      super(buildWebClient(mapper, maxBuffSize, config), mapper, dateFormat);
-      setBasePath(config.getAddress());
-      setUsername(config.getUserName());
-      setPassword(config.getPassword());
-    }
+  @Override
+  public Mono<ResponseEntity<Void>> deleteConnectorWithHttpInfo(String connectorName)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.deleteConnectorWithHttpInfo(connectorName));
+  }
 
-    public static DateFormat getDefaultDateFormat() {
-      DateFormat dateFormat = new RFC3339DateFormat();
-      dateFormat.setTimeZone(TimeZone.getTimeZone("UTC"));
-      return dateFormat;
-    }
 
-    public static WebClient buildWebClient(ObjectMapper mapper, DataSize maxBuffSize, KafkaConnectCluster config) {
-      ExchangeStrategies strategies = ExchangeStrategies
-              .builder()
-              .codecs(clientDefaultCodecsConfigurer -> {
-                clientDefaultCodecsConfigurer.defaultCodecs()
-                        .jackson2JsonEncoder(new Jackson2JsonEncoder(mapper, MediaType.APPLICATION_JSON));
-                clientDefaultCodecsConfigurer.defaultCodecs()
-                        .jackson2JsonDecoder(new Jackson2JsonDecoder(mapper, MediaType.APPLICATION_JSON));
-                clientDefaultCodecsConfigurer.defaultCodecs()
-                        .maxInMemorySize((int) maxBuffSize.toBytes());
-              })
-              .build();
-
-      try {
-        WebClient.Builder webClient = SecuredWebClient.configure(
-            config.getKeystoreLocation(),
-            config.getKeystorePassword(),
-            config.getTruststoreLocation(),
-            config.getTruststorePassword()
-        );
-
-        return webClient.exchangeStrategies(strategies).build();
-      } catch (Exception e) {
-        throw new IllegalStateException(
-            "cannot create TLS configuration for kafka-connect cluster " + config.getName(), e);
-      }
-    }
+  @Override
+  public Mono<Connector> getConnector(String connectorName) throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnector(connectorName));
+  }
 
-    public static ObjectMapper buildObjectMapper(DateFormat dateFormat) {
-      ObjectMapper mapper = new ObjectMapper();
-      mapper.setDateFormat(dateFormat);
-      mapper.registerModule(new JavaTimeModule());
-      mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
-      JsonNullableModule jnm = new JsonNullableModule();
-      mapper.registerModule(jnm);
-      return mapper;
-    }
+  @Override
+  public Mono<ResponseEntity<Connector>> getConnectorWithHttpInfo(String connectorName)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorWithHttpInfo(connectorName));
+  }
+
+  @Override
+  public Mono<Map<String, Object>> getConnectorConfig(String connectorName) throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorConfig(connectorName));
+  }
+
+  @Override
+  public Mono<ResponseEntity<Map<String, Object>>> getConnectorConfigWithHttpInfo(String connectorName)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorConfigWithHttpInfo(connectorName));
+  }
 
-    @Override
-    public <T> Mono<T> invokeAPI(String path, HttpMethod method, Map<String, Object> pathParams,
-                                 MultiValueMap<String, String> queryParams, Object body,
-                                 HttpHeaders headerParams,
-                                 MultiValueMap<String, String> cookieParams,
-                                 MultiValueMap<String, Object> formParams, List<MediaType> accept,
-                                 MediaType contentType, String[] authNames,
-                                 ParameterizedTypeReference<T> returnType)
-        throws RestClientException {
-      return withRetryOnConflict(
-          super.invokeAPI(path, method, pathParams, queryParams, body, headerParams, cookieParams,
-              formParams, accept, contentType, authNames, returnType)
-      );
+  @Override
+  public Flux<ConnectorPlugin> getConnectorPlugins() throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorPlugins());
+  }
+
+  @Override
+  public Mono<ResponseEntity<List<ConnectorPlugin>>> getConnectorPluginsWithHttpInfo()
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorPluginsWithHttpInfo());
+  }
+
+  @Override
+  public Mono<ConnectorStatus> getConnectorStatus(String connectorName) throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorStatus(connectorName));
+  }
+
+  @Override
+  public Mono<ResponseEntity<ConnectorStatus>> getConnectorStatusWithHttpInfo(String connectorName)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorStatusWithHttpInfo(connectorName));
+  }
+
+  @Override
+  public Mono<TaskStatus> getConnectorTaskStatus(String connectorName, Integer taskId)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorTaskStatus(connectorName, taskId));
+  }
+
+  @Override
+  public Mono<ResponseEntity<TaskStatus>> getConnectorTaskStatusWithHttpInfo(String connectorName, Integer taskId)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorTaskStatusWithHttpInfo(connectorName, taskId));
+  }
+
+  @Override
+  public Flux<ConnectorTask> getConnectorTasks(String connectorName) throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorTasks(connectorName));
+  }
+
+  @Override
+  public Mono<ResponseEntity<List<ConnectorTask>>> getConnectorTasksWithHttpInfo(String connectorName)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorTasksWithHttpInfo(connectorName));
+  }
+
+  @Override
+  public Mono<Map<String, ConnectorTopics>> getConnectorTopics(String connectorName) throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorTopics(connectorName));
+  }
+
+  @Override
+  public Mono<ResponseEntity<Map<String, ConnectorTopics>>> getConnectorTopicsWithHttpInfo(String connectorName)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorTopicsWithHttpInfo(connectorName));
+  }
+
+  @Override
+  public Flux<String> getConnectors(String search) throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectors(search));
+  }
+
+  @Override
+  public Mono<ResponseEntity<List<String>>> getConnectorsWithHttpInfo(String search) throws WebClientResponseException {
+    return withRetryOnConflict(super.getConnectorsWithHttpInfo(search));
+  }
+
+  @Override
+  public Mono<Void> pauseConnector(String connectorName) throws WebClientResponseException {
+    return withRetryOnConflict(super.pauseConnector(connectorName));
+  }
+
+  @Override
+  public Mono<ResponseEntity<Void>> pauseConnectorWithHttpInfo(String connectorName) throws WebClientResponseException {
+    return withRetryOnConflict(super.pauseConnectorWithHttpInfo(connectorName));
+  }
+
+  @Override
+  public Mono<Void> restartConnector(String connectorName, Boolean includeTasks, Boolean onlyFailed)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.restartConnector(connectorName, includeTasks, onlyFailed));
+  }
+
+  @Override
+  public Mono<ResponseEntity<Void>> restartConnectorWithHttpInfo(String connectorName, Boolean includeTasks,
+                                                                 Boolean onlyFailed) throws WebClientResponseException {
+    return withRetryOnConflict(super.restartConnectorWithHttpInfo(connectorName, includeTasks, onlyFailed));
+  }
+
+  @Override
+  public Mono<Void> restartConnectorTask(String connectorName, Integer taskId) throws WebClientResponseException {
+    return withRetryOnConflict(super.restartConnectorTask(connectorName, taskId));
+  }
+
+  @Override
+  public Mono<ResponseEntity<Void>> restartConnectorTaskWithHttpInfo(String connectorName, Integer taskId)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.restartConnectorTaskWithHttpInfo(connectorName, taskId));
+  }
+
+  @Override
+  public Mono<Void> resumeConnector(String connectorName) throws WebClientResponseException {
+    return super.resumeConnector(connectorName);
+  }
+
+  @Override
+  public Mono<ResponseEntity<Void>> resumeConnectorWithHttpInfo(String connectorName)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.resumeConnectorWithHttpInfo(connectorName));
+  }
+
+  @Override
+  public Mono<ResponseEntity<Connector>> setConnectorConfigWithHttpInfo(String connectorName,
+                                                                        Map<String, Object> requestBody)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.setConnectorConfigWithHttpInfo(connectorName, requestBody));
+  }
+
+  @Override
+  public Mono<ConnectorPluginConfigValidationResponse> validateConnectorPluginConfig(String pluginName,
+                                                                                     Map<String, Object> requestBody)
+      throws WebClientResponseException {
+    return withRetryOnConflict(super.validateConnectorPluginConfig(pluginName, requestBody));
+  }
+
+  @Override
+  public Mono<ResponseEntity<ConnectorPluginConfigValidationResponse>> validateConnectorPluginConfigWithHttpInfo(
+      String pluginName, Map<String, Object> requestBody) throws WebClientResponseException {
+    return withRetryOnConflict(super.validateConnectorPluginConfigWithHttpInfo(pluginName, requestBody));
+  }
+
+  private static class RetryingApiClient extends ApiClient {
+
+    public RetryingApiClient(ConnectCluster config,
+                             ClustersProperties.TruststoreConfig truststoreConfig,
+                             DataSize maxBuffSize) {
+      super(buildWebClient(maxBuffSize, config, truststoreConfig), null, null);
+      setBasePath(config.getAddress());
+      setUsername(config.getUsername());
+      setPassword(config.getPassword());
     }
 
-    @Override
-    public <T> Flux<T> invokeFluxAPI(String path, HttpMethod method, Map<String, Object> pathParams,
-                                     MultiValueMap<String, String> queryParams, Object body,
-                                     HttpHeaders headerParams,
-                                     MultiValueMap<String, String> cookieParams,
-                                     MultiValueMap<String, Object> formParams,
-                                     List<MediaType> accept, MediaType contentType,
-                                     String[] authNames, ParameterizedTypeReference<T> returnType)
-        throws RestClientException {
-      return withRetryOnConflict(
-          super.invokeFluxAPI(path, method, pathParams, queryParams, body, headerParams,
-              cookieParams, formParams, accept, contentType, authNames, returnType)
-      );
+    public static WebClient buildWebClient(DataSize maxBuffSize,
+                                           ConnectCluster config,
+                                           ClustersProperties.TruststoreConfig truststoreConfig) {
+      return new WebClientConfigurator()
+          .configureSsl(
+              truststoreConfig,
+              new ClustersProperties.KeystoreConfig(
+                  config.getKeystoreLocation(),
+                  config.getKeystorePassword()
+              )
+          )
+          .configureBasicAuth(
+              config.getUsername(),
+              config.getPassword()
+          )
+          .configureBufferSize(maxBuffSize)
+          .build();
     }
   }
 }

+ 88 - 21
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java

@@ -1,14 +1,18 @@
 package com.provectus.kafka.ui.config;
 
+import com.provectus.kafka.ui.model.MetricsConfig;
+import jakarta.annotation.PostConstruct;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
-import java.util.Properties;
 import java.util.Set;
-import javax.annotation.PostConstruct;
+import javax.annotation.Nullable;
+import lombok.AllArgsConstructor;
+import lombok.Builder;
 import lombok.Data;
+import lombok.NoArgsConstructor;
 import lombok.ToString;
 import org.springframework.boot.context.properties.ConfigurationProperties;
 import org.springframework.context.annotation.Configuration;
@@ -21,59 +25,79 @@ public class ClustersProperties {
 
   List<Cluster> clusters = new ArrayList<>();
 
+  String internalTopicPrefix;
+
+  Integer adminClientTimeout;
+
+  PollingProperties polling = new PollingProperties();
+
   @Data
   public static class Cluster {
     String name;
     String bootstrapServers;
     String schemaRegistry;
     SchemaRegistryAuth schemaRegistryAuth;
-    WebClientSsl schemaRegistrySsl;
+    KeystoreConfig schemaRegistrySsl;
     String ksqldbServer;
     KsqldbServerAuth ksqldbServerAuth;
-    WebClientSsl ksqldbServerSsl;
+    KeystoreConfig ksqldbServerSsl;
     List<ConnectCluster> kafkaConnect;
     MetricsConfigData metrics;
-    Properties properties;
+    Map<String, Object> properties;
     boolean readOnly = false;
-    boolean disableLogDirsCollection = false;
-    List<SerdeConfig> serde = new ArrayList<>();
+    List<SerdeConfig> serde;
     String defaultKeySerde;
     String defaultValueSerde;
-    List<Masking> masking = new ArrayList<>();
-    long pollingThrottleRate = 0;
+    List<Masking> masking;
+    Long pollingThrottleRate;
+    TruststoreConfig ssl;
+  }
+
+  @Data
+  public static class PollingProperties {
+    Integer pollTimeoutMs;
+    Integer partitionPollTimeout;
+    Integer noDataEmptyPolls;
+    Integer maxPageSize;
+    Integer defaultPageSize;
   }
 
   @Data
+  @ToString(exclude = "password")
   public static class MetricsConfigData {
     String type;
     Integer port;
-    boolean ssl;
+    Boolean ssl;
     String username;
     String password;
+    String keystoreLocation;
+    String keystorePassword;
   }
 
   @Data
+  @NoArgsConstructor
+  @AllArgsConstructor
+  @Builder(toBuilder = true)
+  @ToString(exclude = {"password", "keystorePassword"})
   public static class ConnectCluster {
     String name;
     String address;
-    String userName;
+    String username;
     String password;
     String keystoreLocation;
     String keystorePassword;
-    String truststoreLocation;
-    String truststorePassword;
   }
 
   @Data
+  @ToString(exclude = {"password"})
   public static class SchemaRegistryAuth {
     String username;
     String password;
   }
 
   @Data
-  public static class WebClientSsl {
-    String keystoreLocation;
-    String keystorePassword;
+  @ToString(exclude = {"truststorePassword"})
+  public static class TruststoreConfig {
     String truststoreLocation;
     String truststorePassword;
   }
@@ -83,7 +107,7 @@ public class ClustersProperties {
     String name;
     String className;
     String filePath;
-    Map<String, Object> properties = new HashMap<>();
+    Map<String, Object> properties;
     String topicKeysPattern;
     String topicValuesPattern;
   }
@@ -95,12 +119,21 @@ public class ClustersProperties {
     String password;
   }
 
+  @Data
+  @NoArgsConstructor
+  @AllArgsConstructor
+  @ToString(exclude = {"keystorePassword"})
+  public static class KeystoreConfig {
+    String keystoreLocation;
+    String keystorePassword;
+  }
+
   @Data
   public static class Masking {
     Type type;
-    List<String> fields = List.of(); //if empty - policy will be applied to all fields
-    List<String> pattern = List.of("X", "x", "n", "-"); //used when type=MASK
-    String replacement = "***DATA_MASKED***"; //used when type=REPLACE
+    List<String> fields; //if null or empty list - policy will be applied to all fields
+    List<String> pattern; //used when type=MASK
+    String replacement; //used when type=REPLACE
     String topicKeysPattern;
     String topicValuesPattern;
 
@@ -111,7 +144,41 @@ public class ClustersProperties {
 
   @PostConstruct
   public void validateAndSetDefaults() {
-    validateClusterNames();
+    if (clusters != null) {
+      validateClusterNames();
+      flattenClusterProperties();
+      setMetricsDefaults();
+    }
+  }
+
+  private void setMetricsDefaults() {
+    for (Cluster cluster : clusters) {
+      if (cluster.getMetrics() != null && !StringUtils.hasText(cluster.getMetrics().getType())) {
+        cluster.getMetrics().setType(MetricsConfig.JMX_METRICS_TYPE);
+      }
+    }
+  }
+
+  private void flattenClusterProperties() {
+    for (Cluster cluster : clusters) {
+      cluster.setProperties(flattenClusterProperties(null, cluster.getProperties()));
+    }
+  }
+
+  private Map<String, Object> flattenClusterProperties(@Nullable String prefix,
+                                                       @Nullable Map<String, Object> propertiesMap) {
+    Map<String, Object> flattened = new HashMap<>();
+    if (propertiesMap != null) {
+      propertiesMap.forEach((k, v) -> {
+        String key = prefix == null ? k : prefix + "." + k;
+        if (v instanceof Map<?, ?>) {
+          flattened.putAll(flattenClusterProperties(key, (Map<String, Object>) v));
+        } else {
+          flattened.put(key, v);
+        }
+      });
+    }
+    return flattened;
   }
 
   private void validateClusterNames() {

+ 1 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java

@@ -5,7 +5,6 @@ import java.util.Map;
 import lombok.AllArgsConstructor;
 import org.openapitools.jackson.nullable.JsonNullableModule;
 import org.springframework.beans.factory.ObjectProvider;
-import org.springframework.beans.factory.annotation.Value;
 import org.springframework.boot.autoconfigure.web.ServerProperties;
 import org.springframework.boot.autoconfigure.web.reactive.WebFluxProperties;
 import org.springframework.context.ApplicationContext;
@@ -15,8 +14,6 @@ import org.springframework.http.server.reactive.ContextPathCompositeHandler;
 import org.springframework.http.server.reactive.HttpHandler;
 import org.springframework.jmx.export.MBeanExporter;
 import org.springframework.util.StringUtils;
-import org.springframework.util.unit.DataSize;
-import org.springframework.web.reactive.function.client.WebClient;
 import org.springframework.web.server.adapter.WebHttpHandlerBuilder;
 
 @Configuration
@@ -52,14 +49,7 @@ public class Config {
   }
 
   @Bean
-  public WebClient webClient(
-      @Value("${webclient.max-in-memory-buffer-size:20MB}") DataSize maxBuffSize) {
-    return WebClient.builder()
-        .codecs(c -> c.defaultCodecs().maxInMemorySize((int) maxBuffSize.toBytes()))
-        .build();
-  }
-
-  @Bean
+  // will be used by webflux json mapping
   public JsonNullableModule jsonNullableModule() {
     return new JsonNullableModule();
   }

+ 1 - 41
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java

@@ -1,25 +1,12 @@
 package com.provectus.kafka.ui.config;
 
-import lombok.AllArgsConstructor;
-import org.springframework.boot.autoconfigure.web.ServerProperties;
-import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
-import org.springframework.context.annotation.Profile;
-import org.springframework.core.io.ClassPathResource;
-import org.springframework.util.StringUtils;
 import org.springframework.web.reactive.config.CorsRegistry;
 import org.springframework.web.reactive.config.WebFluxConfigurer;
-import org.springframework.web.reactive.function.server.RouterFunction;
-import org.springframework.web.reactive.function.server.RouterFunctions;
-import org.springframework.web.reactive.function.server.ServerResponse;
 
 @Configuration
-@Profile("local")
-@AllArgsConstructor
 public class CorsGlobalConfiguration implements WebFluxConfigurer {
 
-  private final ServerProperties serverProperties;
-
   @Override
   public void addCorsMappings(CorsRegistry registry) {
     registry.addMapping("/**")
@@ -28,31 +15,4 @@ public class CorsGlobalConfiguration implements WebFluxConfigurer {
         .allowedHeaders("*")
         .allowCredentials(false);
   }
-
-  private String withContext(String pattern) {
-    final String basePath = serverProperties.getServlet().getContextPath();
-    if (StringUtils.hasText(basePath)) {
-      return basePath + pattern;
-    } else {
-      return pattern;
-    }
-  }
-
-  @Bean
-  public RouterFunction<ServerResponse> cssFilesRouter() {
-    return RouterFunctions
-        .resources(withContext("/static/css/**"), new ClassPathResource("static/static/css/"));
-  }
-
-  @Bean
-  public RouterFunction<ServerResponse> jsFilesRouter() {
-    return RouterFunctions
-        .resources(withContext("/static/js/**"), new ClassPathResource("static/static/js/"));
-  }
-
-  @Bean
-  public RouterFunction<ServerResponse> mediaFilesRouter() {
-    return RouterFunctions
-        .resources(withContext("/static/media/**"), new ClassPathResource("static/static/media/"));
-  }
-}
+}

+ 33 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/WebclientProperties.java

@@ -0,0 +1,33 @@
+package com.provectus.kafka.ui.config;
+
+import com.provectus.kafka.ui.exception.ValidationException;
+import java.beans.Transient;
+import javax.annotation.PostConstruct;
+import lombok.Data;
+import org.springframework.boot.context.properties.ConfigurationProperties;
+import org.springframework.context.annotation.Configuration;
+import org.springframework.util.unit.DataSize;
+
+@Configuration
+@ConfigurationProperties("webclient")
+@Data
+public class WebclientProperties {
+
+  String maxInMemoryBufferSize;
+
+  @PostConstruct
+  public void validate() {
+    validateAndSetDefaultBufferSize();
+  }
+
+  private void validateAndSetDefaultBufferSize() {
+    if (maxInMemoryBufferSize != null) {
+      try {
+        DataSize.parse(maxInMemoryBufferSize);
+      } catch (Exception e) {
+        throw new ValidationException("Invalid format for webclient.maxInMemoryBufferSize");
+      }
+    }
+  }
+
+}

+ 3 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthProperties.java

@@ -1,10 +1,9 @@
 package com.provectus.kafka.ui.config.auth;
 
+import jakarta.annotation.PostConstruct;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
-import javax.annotation.PostConstruct;
 import lombok.Data;
 import org.springframework.boot.context.properties.ConfigurationProperties;
 import org.springframework.util.Assert;
@@ -32,13 +31,13 @@ public class OAuthProperties {
     private String clientName;
     private String redirectUri;
     private String authorizationGrantType;
-    private Set<String> scope = new HashSet<>();
+    private Set<String> scope;
     private String issuerUri;
     private String authorizationUri;
     private String tokenUri;
     private String userInfoUri;
     private String jwkSetUri;
     private String userNameAttribute;
-    private Map<String, String> customParams = new HashMap<>();
+    private Map<String, String> customParams;
   }
 }

+ 5 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/OAuthPropertiesConverter.java

@@ -4,6 +4,8 @@ import static com.provectus.kafka.ui.config.auth.OAuthProperties.OAuth2Provider;
 import static org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties.Provider;
 import static org.springframework.boot.autoconfigure.security.oauth2.client.OAuth2ClientProperties.Registration;
 
+import java.util.Optional;
+import java.util.Set;
 import lombok.AccessLevel;
 import lombok.NoArgsConstructor;
 import org.apache.commons.lang3.StringUtils;
@@ -24,7 +26,7 @@ public final class OAuthPropertiesConverter {
       registration.setClientId(provider.getClientId());
       registration.setClientSecret(provider.getClientSecret());
       registration.setClientName(provider.getClientName());
-      registration.setScope(provider.getScope());
+      registration.setScope(Optional.ofNullable(provider.getScope()).orElse(Set.of()));
       registration.setRedirectUri(provider.getRedirectUri());
       registration.setAuthorizationGrantType(provider.getAuthorizationGrantType());
 
@@ -71,7 +73,8 @@ public final class OAuthPropertiesConverter {
   }
 
   private static boolean isGoogle(OAuth2Provider provider) {
-    return GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
+    return provider.getCustomParams() != null
+        && GOOGLE.equalsIgnoreCase(provider.getCustomParams().get(TYPE));
   }
 }
 

+ 5 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/logout/CognitoLogoutSuccessHandler.java

@@ -12,6 +12,7 @@ import org.springframework.security.core.Authentication;
 import org.springframework.security.web.server.WebFilterExchange;
 import org.springframework.security.web.util.UrlUtils;
 import org.springframework.stereotype.Component;
+import org.springframework.util.Assert;
 import org.springframework.web.server.WebSession;
 import org.springframework.web.util.UriComponents;
 import org.springframework.web.util.UriComponentsBuilder;
@@ -45,6 +46,10 @@ public class CognitoLogoutSuccessHandler implements LogoutSuccessHandler {
         .fragment(null)
         .build();
 
+    Assert.isTrue(
+        provider.getCustomParams() != null && provider.getCustomParams().containsKey("logoutUrl"),
+        "Custom params should contain 'logoutUrl'"
+    );
     final var uri = UriComponentsBuilder
         .fromUri(URI.create(provider.getCustomParams().get("logoutUrl")))
         .queryParam("client_id", provider.getClientId())

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/AccessController.java

@@ -66,7 +66,7 @@ public class AccessController implements AuthorizationApi {
           UserPermissionDTO dto = new UserPermissionDTO();
           dto.setClusters(clusters);
           dto.setResource(ResourceTypeDTO.fromValue(permission.getResource().toString().toUpperCase()));
-          dto.setValue(permission.getValue() != null ? permission.getValue().toString() : null);
+          dto.setValue(permission.getValue());
           dto.setActions(permission.getActions()
               .stream()
               .map(String::toUpperCase)

+ 130 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ApplicationConfigController.java

@@ -0,0 +1,130 @@
+package com.provectus.kafka.ui.controller;
+
+import static com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction.EDIT;
+import static com.provectus.kafka.ui.model.rbac.permission.ApplicationConfigAction.VIEW;
+
+import com.provectus.kafka.ui.api.ApplicationConfigApi;
+import com.provectus.kafka.ui.config.ClustersProperties;
+import com.provectus.kafka.ui.model.ApplicationConfigDTO;
+import com.provectus.kafka.ui.model.ApplicationConfigPropertiesDTO;
+import com.provectus.kafka.ui.model.ApplicationConfigValidationDTO;
+import com.provectus.kafka.ui.model.ApplicationInfoDTO;
+import com.provectus.kafka.ui.model.ClusterConfigValidationDTO;
+import com.provectus.kafka.ui.model.RestartRequestDTO;
+import com.provectus.kafka.ui.model.UploadedFileInfoDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.service.ApplicationInfoService;
+import com.provectus.kafka.ui.service.KafkaClusterFactory;
+import com.provectus.kafka.ui.service.rbac.AccessControlService;
+import com.provectus.kafka.ui.util.ApplicationRestarter;
+import com.provectus.kafka.ui.util.DynamicConfigOperations;
+import com.provectus.kafka.ui.util.DynamicConfigOperations.PropertiesStructure;
+import java.util.Map;
+import javax.annotation.Nullable;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.mapstruct.Mapper;
+import org.mapstruct.factory.Mappers;
+import org.springframework.http.ResponseEntity;
+import org.springframework.http.codec.multipart.FilePart;
+import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.server.ServerWebExchange;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+@Slf4j
+@RestController
+@RequiredArgsConstructor
+public class ApplicationConfigController implements ApplicationConfigApi {
+
+  private static final PropertiesMapper MAPPER = Mappers.getMapper(PropertiesMapper.class);
+
+  @Mapper
+  interface PropertiesMapper {
+
+    PropertiesStructure fromDto(ApplicationConfigPropertiesDTO dto);
+
+    ApplicationConfigPropertiesDTO toDto(PropertiesStructure propertiesStructure);
+  }
+
+  private final AccessControlService accessControlService;
+  private final DynamicConfigOperations dynamicConfigOperations;
+  private final ApplicationRestarter restarter;
+  private final KafkaClusterFactory kafkaClusterFactory;
+  private final ApplicationInfoService applicationInfoService;
+
+  @Override
+  public Mono<ResponseEntity<ApplicationInfoDTO>> getApplicationInfo(ServerWebExchange exchange) {
+    return Mono.just(applicationInfoService.getApplicationInfo()).map(ResponseEntity::ok);
+  }
+
+  @Override
+  public Mono<ResponseEntity<ApplicationConfigDTO>> getCurrentConfig(ServerWebExchange exchange) {
+    return accessControlService
+        .validateAccess(
+            AccessContext.builder()
+                .applicationConfigActions(VIEW)
+                .build()
+        )
+        .then(Mono.fromSupplier(() -> ResponseEntity.ok(
+            new ApplicationConfigDTO()
+                .properties(MAPPER.toDto(dynamicConfigOperations.getCurrentProperties()))
+        )));
+  }
+
+  @Override
+  public Mono<ResponseEntity<Void>> restartWithConfig(Mono<RestartRequestDTO> restartRequestDto,
+                                                      ServerWebExchange exchange) {
+    return accessControlService
+        .validateAccess(
+            AccessContext.builder()
+                .applicationConfigActions(EDIT)
+                .build()
+        )
+        .then(restartRequestDto)
+        .map(dto -> {
+          dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
+          restarter.requestRestart();
+          return ResponseEntity.ok().build();
+        });
+  }
+
+  @Override
+  public Mono<ResponseEntity<UploadedFileInfoDTO>> uploadConfigRelatedFile(FilePart file, ServerWebExchange exchange) {
+    return accessControlService
+        .validateAccess(
+            AccessContext.builder()
+                .applicationConfigActions(EDIT)
+                .build()
+        )
+        .then(dynamicConfigOperations.uploadConfigRelatedFile(file))
+        .map(path -> new UploadedFileInfoDTO().location(path.toString()))
+        .map(ResponseEntity::ok);
+  }
+
+  @Override
+  public Mono<ResponseEntity<ApplicationConfigValidationDTO>> validateConfig(Mono<ApplicationConfigDTO> configDto,
+                                                                             ServerWebExchange exchange) {
+    return configDto
+        .flatMap(config -> {
+          PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
+          ClustersProperties clustersProperties = propertiesStructure.getKafka();
+          return validateClustersConfig(clustersProperties)
+              .map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
+        })
+        .map(ResponseEntity::ok);
+  }
+
+  private Mono<Map<String, ClusterConfigValidationDTO>> validateClustersConfig(
+      @Nullable ClustersProperties properties) {
+    if (properties == null || properties.getClusters() == null) {
+      return Mono.just(Map.of());
+    }
+    properties.validateAndSetDefaults();
+    return Flux.fromIterable(properties.getClusters())
+        .flatMap(c -> kafkaClusterFactory.validate(c).map(v -> Tuples.of(c.getName(), v)))
+        .collectMap(Tuple2::getT1, Tuple2::getT2);
+  }
+}

+ 2 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java

@@ -189,8 +189,8 @@ public class ConsumerGroupsController extends AbstractController implements Cons
   private ConsumerGroupsPageResponseDTO convertPage(ConsumerGroupService.ConsumerGroupsPage
                                                         consumerGroupConsumerGroupsPage) {
     return new ConsumerGroupsPageResponseDTO()
-        .pageCount(consumerGroupConsumerGroupsPage.getTotalPages())
-        .consumerGroups(consumerGroupConsumerGroupsPage.getConsumerGroups()
+        .pageCount(consumerGroupConsumerGroupsPage.totalPages())
+        .consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
             .stream()
             .map(ConsumerGroupMapper::toDto)
             .collect(Collectors.toList()));

+ 10 - 12
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java

@@ -37,10 +37,10 @@ public class KafkaConnectController extends AbstractController implements KafkaC
   public Mono<ResponseEntity<Flux<ConnectDTO>>> getConnects(String clusterName,
                                                             ServerWebExchange exchange) {
 
-    Flux<ConnectDTO> flux = Flux.fromIterable(kafkaConnectService.getConnects(getCluster(clusterName)))
+    Flux<ConnectDTO> availableConnects = kafkaConnectService.getConnects(getCluster(clusterName))
         .filterWhen(dto -> accessControlService.isConnectAccessible(dto, clusterName));
 
-    return Mono.just(ResponseEntity.ok(flux));
+    return Mono.just(ResponseEntity.ok(availableConnects));
   }
 
   @Override
@@ -54,7 +54,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
         .build());
 
     return validateAccess.thenReturn(
-        ResponseEntity.ok(kafkaConnectService.getConnectors(getCluster(clusterName), connectName))
+        ResponseEntity.ok(kafkaConnectService.getConnectorNames(getCluster(clusterName), connectName))
     );
   }
 
@@ -149,10 +149,9 @@ public class KafkaConnectController extends AbstractController implements KafkaC
   }
 
   @Override
-  public Mono<ResponseEntity<ConnectorDTO>> setConnectorConfig(String clusterName,
-                                                               String connectName,
+  public Mono<ResponseEntity<ConnectorDTO>> setConnectorConfig(String clusterName, String connectName,
                                                                String connectorName,
-                                                               @Valid Mono<Object> requestBody,
+                                                               Mono<Map<String, Object>> requestBody,
                                                                ServerWebExchange exchange) {
 
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
@@ -164,8 +163,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
     return validateAccess.then(
         kafkaConnectService
             .setConnectorConfig(getCluster(clusterName), connectName, connectorName, requestBody)
-            .map(ResponseEntity::ok)
-    );
+            .map(ResponseEntity::ok));
   }
 
   @Override
@@ -234,15 +232,15 @@ public class KafkaConnectController extends AbstractController implements KafkaC
         .build());
 
     return validateAccess.then(
-        kafkaConnectService
-            .getConnectorPlugins(getCluster(clusterName), connectName)
-            .map(ResponseEntity::ok)
+        Mono.just(
+            ResponseEntity.ok(
+                kafkaConnectService.getConnectorPlugins(getCluster(clusterName), connectName)))
     );
   }
 
   @Override
   public Mono<ResponseEntity<ConnectorPluginConfigValidationResponseDTO>> validateConnectorPluginConfig(
-      String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody,
+      String clusterName, String connectName, String pluginName, @Valid Mono<Map<String, Object>> requestBody,
       ServerWebExchange exchange) {
     return kafkaConnectService
         .validateConnectorPluginConfig(

+ 1 - 6
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java

@@ -43,9 +43,6 @@ import reactor.core.scheduler.Schedulers;
 @Slf4j
 public class MessagesController extends AbstractController implements MessagesApi {
 
-  private static final int MAX_LOAD_RECORD_LIMIT = 100;
-  private static final int DEFAULT_LOAD_RECORD_LIMIT = 20;
-
   private final MessagesService messagesService;
   private final DeserializationService deserializationService;
   private final AccessControlService accessControlService;
@@ -91,8 +88,6 @@ public class MessagesController extends AbstractController implements MessagesAp
     seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
     seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
     filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
-    int recordsLimit =
-        Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT);
 
     var positions = new ConsumerPosition(
         seekType,
@@ -103,7 +98,7 @@ public class MessagesController extends AbstractController implements MessagesAp
         ResponseEntity.ok(
             messagesService.loadMessages(
                 getCluster(clusterName), topicName, positions, q, filterQueryType,
-                recordsLimit, seekDirection, keySerde, valueSerde)
+                limit, seekDirection, keySerde, valueSerde)
         )
     );
 

+ 64 - 61
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java

@@ -2,7 +2,8 @@ package com.provectus.kafka.ui.controller;
 
 import com.provectus.kafka.ui.api.SchemasApi;
 import com.provectus.kafka.ui.exception.ValidationException;
-import com.provectus.kafka.ui.mapper.ClusterMapper;
+import com.provectus.kafka.ui.mapper.KafkaSrMapper;
+import com.provectus.kafka.ui.mapper.KafkaSrMapperImpl;
 import com.provectus.kafka.ui.model.CompatibilityCheckResponseDTO;
 import com.provectus.kafka.ui.model.CompatibilityLevelDTO;
 import com.provectus.kafka.ui.model.KafkaCluster;
@@ -32,7 +33,7 @@ public class SchemasController extends AbstractController implements SchemasApi
 
   private static final Integer DEFAULT_PAGE_SIZE = 25;
 
-  private final ClusterMapper mapper;
+  private final KafkaSrMapper kafkaSrMapper = new KafkaSrMapperImpl();
 
   private final SchemaRegistryService schemaRegistryService;
   private final AccessControlService accessControlService;
@@ -40,7 +41,7 @@ public class SchemasController extends AbstractController implements SchemasApi
   @Override
   protected KafkaCluster getCluster(String clusterName) {
     var c = super.getCluster(clusterName);
-    if (c.getSchemaRegistry() == null) {
+    if (c.getSchemaRegistryClient() == null) {
       throw new ValidationException("Schema Registry is not set for cluster " + clusterName);
     }
     return c;
@@ -48,9 +49,8 @@ public class SchemasController extends AbstractController implements SchemasApi
 
   @Override
   public Mono<ResponseEntity<CompatibilityCheckResponseDTO>> checkSchemaCompatibility(
-      String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubject,
+      String clusterName, String subject, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       ServerWebExchange exchange) {
-
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
@@ -58,37 +58,41 @@ public class SchemasController extends AbstractController implements SchemasApi
         .build());
 
     return validateAccess.then(
-        schemaRegistryService.checksSchemaCompatibility(
-                getCluster(clusterName), subject, newSchemaSubject)
-            .map(mapper::toCompatibilityCheckResponse)
+        newSchemaSubjectMono.flatMap(subjectDTO ->
+                schemaRegistryService.checksSchemaCompatibility(
+                    getCluster(clusterName),
+                    subject,
+                    kafkaSrMapper.fromDto(subjectDTO)
+                ))
+            .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
     );
   }
 
   @Override
   public Mono<ResponseEntity<SchemaSubjectDTO>> createNewSchema(
-      String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubject,
+      String clusterName, @Valid Mono<NewSchemaSubjectDTO> newSchemaSubjectMono,
       ServerWebExchange exchange) {
+    Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
+        .cluster(clusterName)
+        .schemaActions(SchemaAction.CREATE)
+        .build());
 
-    return newSchemaSubject.flatMap(dto -> {
-      Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
-          .cluster(clusterName)
-          .schemaActions(SchemaAction.CREATE)
-          .build());
-
-      return validateAccess.then(
-          schemaRegistryService
-              .registerNewSchema(getCluster(clusterName), dto)
-              .map(ResponseEntity::ok)
-      );
-    });
+    return validateAccess.then(
+        newSchemaSubjectMono.flatMap(newSubject ->
+                schemaRegistryService.registerNewSchema(
+                    getCluster(clusterName),
+                    newSubject.getSubject(),
+                    kafkaSrMapper.fromDto(newSubject)
+                )
+            ).map(kafkaSrMapper::toDto)
+            .map(ResponseEntity::ok)
+    );
   }
 
   @Override
-  public Mono<ResponseEntity<Void>> deleteLatestSchema(String clusterName,
-                                                       String subject,
-                                                       ServerWebExchange exchange) {
-
+  public Mono<ResponseEntity<Void>> deleteLatestSchema(
+      String clusterName, String subject, ServerWebExchange exchange) {
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
@@ -102,9 +106,8 @@ public class SchemasController extends AbstractController implements SchemasApi
   }
 
   @Override
-  public Mono<ResponseEntity<Void>> deleteSchema(String clusterName,
-                                                 String subject,
-                                                 ServerWebExchange exchange) {
+  public Mono<ResponseEntity<Void>> deleteSchema(
+      String clusterName, String subject, ServerWebExchange exchange) {
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
@@ -118,36 +121,32 @@ public class SchemasController extends AbstractController implements SchemasApi
   }
 
   @Override
-  public Mono<ResponseEntity<Void>> deleteSchemaByVersion(String clusterName,
-                                                          String subject,
-                                                          Integer version,
-                                                          ServerWebExchange exchange) {
-
+  public Mono<ResponseEntity<Void>> deleteSchemaByVersion(
+      String clusterName, String subjectName, Integer version, ServerWebExchange exchange) {
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
         .cluster(clusterName)
-        .schema(subject)
+        .schema(subjectName)
         .schemaActions(SchemaAction.DELETE)
         .build());
 
     return validateAccess.then(
-        schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subject, version)
+        schemaRegistryService.deleteSchemaSubjectByVersion(getCluster(clusterName), subjectName, version)
             .thenReturn(ResponseEntity.ok().build())
     );
   }
 
   @Override
   public Mono<ResponseEntity<Flux<SchemaSubjectDTO>>> getAllVersionsBySubject(
-      String clusterName, String subject, ServerWebExchange exchange) {
-
+      String clusterName, String subjectName, ServerWebExchange exchange) {
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
         .cluster(clusterName)
-        .schema(subject)
+        .schema(subjectName)
         .schemaActions(SchemaAction.VIEW)
         .build());
 
     Flux<SchemaSubjectDTO> schemas =
-        schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subject);
-
+        schemaRegistryService.getAllVersionsBySubject(getCluster(clusterName), subjectName)
+            .map(kafkaSrMapper::toDto);
     return validateAccess.thenReturn(ResponseEntity.ok(schemas));
   }
 
@@ -155,7 +154,7 @@ public class SchemasController extends AbstractController implements SchemasApi
   public Mono<ResponseEntity<CompatibilityLevelDTO>> getGlobalSchemaCompatibilityLevel(
       String clusterName, ServerWebExchange exchange) {
     return schemaRegistryService.getGlobalSchemaCompatibilityLevel(getCluster(clusterName))
-        .map(mapper::toCompatibilityLevelDto)
+        .map(c -> new CompatibilityLevelDTO().compatibility(kafkaSrMapper.toDto(c)))
         .map(ResponseEntity::ok)
         .defaultIfEmpty(ResponseEntity.notFound().build());
   }
@@ -172,6 +171,7 @@ public class SchemasController extends AbstractController implements SchemasApi
 
     return validateAccess.then(
         schemaRegistryService.getLatestSchemaVersionBySubject(getCluster(clusterName), subject)
+            .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
     );
   }
@@ -179,7 +179,6 @@ public class SchemasController extends AbstractController implements SchemasApi
   @Override
   public Mono<ResponseEntity<SchemaSubjectDTO>> getSchemaByVersion(
       String clusterName, String subject, Integer version, ServerWebExchange exchange) {
-
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
         .cluster(clusterName)
         .schema(subject)
@@ -189,6 +188,7 @@ public class SchemasController extends AbstractController implements SchemasApi
     return validateAccess.then(
         schemaRegistryService.getSchemaSubjectByVersion(
                 getCluster(clusterName), subject, version)
+            .map(kafkaSrMapper::toDto)
             .map(ResponseEntity::ok)
     );
   }
@@ -198,10 +198,10 @@ public class SchemasController extends AbstractController implements SchemasApi
                                                                     @Valid Integer pageNum,
                                                                     @Valid Integer perPage,
                                                                     @Valid String search,
-                                                                    ServerWebExchange exchange) {
+                                                                    ServerWebExchange serverWebExchange) {
     return schemaRegistryService
         .getAllSubjectNames(getCluster(clusterName))
-        .flatMapMany(Flux::fromArray)
+        .flatMapIterable(l -> l)
         .filterWhen(schema -> accessControlService.isSchemaAccessible(schema, clusterName))
         .collectList()
         .flatMap(subjects -> {
@@ -218,46 +218,49 @@ public class SchemasController extends AbstractController implements SchemasApi
               .limit(pageSize)
               .collect(Collectors.toList());
           return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
-              .map(a -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(a));
-        })
-        .map(ResponseEntity::ok);
+              .map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
+              .map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
+        }).map(ResponseEntity::ok);
   }
 
   @Override
   public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
-      String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevel,
+      String clusterName, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       ServerWebExchange exchange) {
-
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
         .cluster(clusterName)
         .schemaActions(SchemaAction.MODIFY_GLOBAL_COMPATIBILITY)
         .build());
 
-    log.info("Updating schema compatibility globally");
-
     return validateAccess.then(
-        schemaRegistryService.updateSchemaCompatibility(
-                getCluster(clusterName), compatibilityLevel)
-            .map(ResponseEntity::ok)
+        compatibilityLevelMono
+            .flatMap(compatibilityLevelDTO ->
+                schemaRegistryService.updateGlobalSchemaCompatibility(
+                    getCluster(clusterName),
+                    kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
+                ))
+            .thenReturn(ResponseEntity.ok().build())
     );
   }
 
   @Override
   public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
-      String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevel,
+      String clusterName, String subject, @Valid Mono<CompatibilityLevelDTO> compatibilityLevelMono,
       ServerWebExchange exchange) {
-
     Mono<Void> validateAccess = accessControlService.validateAccess(AccessContext.builder()
         .cluster(clusterName)
         .schemaActions(SchemaAction.EDIT)
         .build());
 
-    log.info("Updating schema compatibility for subject: {}", subject);
-
     return validateAccess.then(
-        schemaRegistryService.updateSchemaCompatibility(
-                getCluster(clusterName), subject, compatibilityLevel)
-            .map(ResponseEntity::ok)
+        compatibilityLevelMono
+            .flatMap(compatibilityLevelDTO ->
+                schemaRegistryService.updateSchemaCompatibility(
+                    getCluster(clusterName),
+                    subject,
+                    kafkaSrMapper.fromDto(compatibilityLevelDTO.getCompatibility())
+                ))
+            .thenReturn(ResponseEntity.ok().build())
     );
   }
 }

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java

@@ -175,7 +175,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
           List<InternalTopic> filtered = existingTopics.stream()
               .filter(topic -> !topic.isInternal()
                   || showInternal != null && showInternal)
-              .filter(topic -> search == null || StringUtils.contains(topic.getName(), search))
+              .filter(topic -> search == null || StringUtils.containsIgnoreCase(topic.getName(), search))
               .sorted(comparator)
               .toList();
           var totalPages = (filtered.size() / pageSize)

+ 18 - 41
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/AbstractEmitter.java

@@ -1,10 +1,6 @@
 package com.provectus.kafka.ui.emitter;
 
-import com.provectus.kafka.ui.model.TopicMessageDTO;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
-import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
-import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
-import com.provectus.kafka.ui.util.PollingThrottler;
 import java.time.Duration;
 import java.time.Instant;
 import org.apache.kafka.clients.consumer.Consumer;
@@ -14,27 +10,20 @@ import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.FluxSink;
 
 public abstract class AbstractEmitter {
-  private static final Duration DEFAULT_POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
 
-  // In some situations it is hard to say whether records range (between two offsets) was fully polled.
-  // This happens when we have holes in records sequences that is usual case for compact topics or
-  // topics with transactional writes. In such cases if you want to poll all records between offsets X and Y
-  // there is no guarantee that you will ever see record with offset Y.
-  // To workaround this we can assume that after N consecutive empty polls all target messages were read.
-  public static final int NO_MORE_DATA_EMPTY_POLLS_COUNT = 3;
-
-  private final ConsumerRecordDeserializer recordDeserializer;
-  private final ConsumingStats consumingStats = new ConsumingStats();
+  private final MessagesProcessing messagesProcessing;
   private final PollingThrottler throttler;
+  protected final PollingSettings pollingSettings;
 
-  protected AbstractEmitter(ConsumerRecordDeserializer recordDeserializer, PollingThrottler throttler) {
-    this.recordDeserializer = recordDeserializer;
-    this.throttler = throttler;
+  protected AbstractEmitter(MessagesProcessing messagesProcessing, PollingSettings pollingSettings) {
+    this.messagesProcessing = messagesProcessing;
+    this.pollingSettings = pollingSettings;
+    this.throttler = pollingSettings.getPollingThrottler();
   }
 
   protected ConsumerRecords<Bytes, Bytes> poll(
       FluxSink<TopicMessageEventDTO> sink, Consumer<Bytes, Bytes> consumer) {
-    return poll(sink, consumer, DEFAULT_POLL_TIMEOUT_MS);
+    return poll(sink, consumer, pollingSettings.getPollTimeout());
   }
 
   protected ConsumerRecords<Bytes, Bytes> poll(
@@ -47,39 +36,27 @@ public abstract class AbstractEmitter {
     return records;
   }
 
+  protected boolean sendLimitReached() {
+    return messagesProcessing.limitReached();
+  }
+
   protected void sendMessage(FluxSink<TopicMessageEventDTO> sink,
-                                                       ConsumerRecord<Bytes, Bytes> msg) {
-    final TopicMessageDTO topicMessage = recordDeserializer.deserialize(msg);
-    sink.next(
-        new TopicMessageEventDTO()
-            .type(TopicMessageEventDTO.TypeEnum.MESSAGE)
-            .message(topicMessage)
-    );
+                             ConsumerRecord<Bytes, Bytes> msg) {
+    messagesProcessing.sendMsg(sink, msg);
   }
 
   protected void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
-    sink.next(
-        new TopicMessageEventDTO()
-            .type(TopicMessageEventDTO.TypeEnum.PHASE)
-            .phase(new TopicMessagePhaseDTO().name(name))
-    );
+    messagesProcessing.sendPhase(sink, name);
   }
 
   protected int sendConsuming(FluxSink<TopicMessageEventDTO> sink,
-                               ConsumerRecords<Bytes, Bytes> records,
-                               long elapsed) {
-    return consumingStats.sendConsumingEvt(sink, records, elapsed, getFilterApplyErrors(sink));
+                              ConsumerRecords<Bytes, Bytes> records,
+                              long elapsed) {
+    return messagesProcessing.sentConsumingInfo(sink, records, elapsed);
   }
 
   protected void sendFinishStatsAndCompleteSink(FluxSink<TopicMessageEventDTO> sink) {
-    consumingStats.sendFinishEvent(sink, getFilterApplyErrors(sink));
+    messagesProcessing.sendFinishEvent(sink);
     sink.complete();
   }
-
-  protected Number getFilterApplyErrors(FluxSink<?> sink) {
-    return sink.contextView()
-        .<MessageFilterStats>getOrEmpty(MessageFilterStats.class)
-        .<Number>map(MessageFilterStats::getFilterApplyErrors)
-        .orElse(0);
-  }
 }

+ 13 - 19
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java

@@ -2,16 +2,12 @@ package com.provectus.kafka.ui.emitter;
 
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
-import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
-import com.provectus.kafka.ui.util.PollingThrottler;
-import java.time.Duration;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 import java.util.TreeMap;
 import java.util.function.Supplier;
-import java.util.stream.Collectors;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
@@ -26,8 +22,6 @@ public class BackwardRecordEmitter
     extends AbstractEmitter
     implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
 
-  private static final Duration POLL_TIMEOUT = Duration.ofMillis(200);
-
   private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
   private final ConsumerPosition consumerPosition;
   private final int messagesPerPage;
@@ -36,9 +30,9 @@ public class BackwardRecordEmitter
       Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
       ConsumerPosition consumerPosition,
       int messagesPerPage,
-      ConsumerRecordDeserializer recordDeserializer,
-      PollingThrottler throttler) {
-    super(recordDeserializer, throttler);
+      MessagesProcessing messagesProcessing,
+      PollingSettings pollingSettings) {
+    super(messagesProcessing, pollingSettings);
     this.consumerPosition = consumerPosition;
     this.messagesPerPage = messagesPerPage;
     this.consumerSupplier = consumerSupplier;
@@ -57,7 +51,7 @@ public class BackwardRecordEmitter
       int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
       log.debug("'Until' offsets for polling: {}", readUntilOffsets);
 
-      while (!sink.isCancelled() && !readUntilOffsets.isEmpty()) {
+      while (!sink.isCancelled() && !readUntilOffsets.isEmpty() && !sendLimitReached()) {
         new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
           if (sink.isCancelled()) {
             return; //fast return in case of sink cancellation
@@ -66,8 +60,6 @@ public class BackwardRecordEmitter
           long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
 
           partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
-              .stream()
-              .filter(r -> !sink.isCancelled())
               .forEach(r -> sendMessage(sink, r));
 
           if (beginOffset == readFromOffset) {
@@ -109,17 +101,19 @@ public class BackwardRecordEmitter
 
     var recordsToSend = new ArrayList<ConsumerRecord<Bytes, Bytes>>();
 
-    // we use empty polls counting to verify that partition was fully read
-    for (int emptyPolls = 0; recordsToSend.size() < desiredMsgsToPoll && emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT;) {
-      var polledRecords = poll(sink, consumer, POLL_TIMEOUT);
-      log.debug("{} records polled from {}", polledRecords.count(), tp);
+    EmptyPollsCounter emptyPolls  = pollingSettings.createEmptyPollsCounter();
+    while (!sink.isCancelled()
+        && !sendLimitReached()
+        && recordsToSend.size() < desiredMsgsToPoll
+        && !emptyPolls.noDataEmptyPollsReached()) {
+      var polledRecords = poll(sink, consumer, pollingSettings.getPartitionPollTimeout());
+      emptyPolls.count(polledRecords);
 
-      // counting sequential empty polls
-      emptyPolls = polledRecords.isEmpty() ? emptyPolls + 1 : 0;
+      log.debug("{} records polled from {}", polledRecords.count(), tp);
 
       var filteredRecords = polledRecords.records(tp).stream()
           .filter(r -> r.offset() < toOffset)
-          .collect(Collectors.toList());
+          .toList();
 
       if (!polledRecords.isEmpty() && filteredRecords.isEmpty()) {
         // we already read all messages in target offsets interval

+ 4 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ConsumingStats.java

@@ -19,7 +19,7 @@ class ConsumingStats {
   int sendConsumingEvt(FluxSink<TopicMessageEventDTO> sink,
                         ConsumerRecords<Bytes, Bytes> polledRecords,
                         long elapsed,
-                        Number filterApplyErrors) {
+                        int filterApplyErrors) {
     int polledBytes = ConsumerRecordsUtil.calculatePolledSize(polledRecords);
     bytes += polledBytes;
     this.records += polledRecords.count();
@@ -32,7 +32,7 @@ class ConsumingStats {
     return polledBytes;
   }
 
-  void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, Number filterApplyErrors) {
+  void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink, int filterApplyErrors) {
     sink.next(
         new TopicMessageEventDTO()
             .type(TopicMessageEventDTO.TypeEnum.DONE)
@@ -41,12 +41,12 @@ class ConsumingStats {
   }
 
   private TopicMessageConsumingDTO createConsumingStats(FluxSink<TopicMessageEventDTO> sink,
-                                                        Number filterApplyErrors) {
+                                                        int filterApplyErrors) {
     return new TopicMessageConsumingDTO()
         .bytesConsumed(this.bytes)
         .elapsedMs(this.elapsed)
         .isCancelled(sink.isCancelled())
-        .filterApplyErrors(filterApplyErrors.intValue())
+        .filterApplyErrors(filterApplyErrors)
         .messagesConsumed(this.records);
   }
 }

+ 28 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/EmptyPollsCounter.java

@@ -0,0 +1,28 @@
+package com.provectus.kafka.ui.emitter;
+
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+
+// In some situations it is hard to say whether records range (between two offsets) was fully polled.
+// This happens when we have holes in records sequences that is usual case for compact topics or
+// topics with transactional writes. In such cases if you want to poll all records between offsets X and Y
+// there is no guarantee that you will ever see record with offset Y.
+// To workaround this we can assume that after N consecutive empty polls all target messages were read.
+public class EmptyPollsCounter {
+
+  private final int maxEmptyPolls;
+
+  private int emptyPolls = 0;
+
+  EmptyPollsCounter(int maxEmptyPolls) {
+    this.maxEmptyPolls = maxEmptyPolls;
+  }
+
+  public void count(ConsumerRecords<?, ?> polled) {
+    emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
+  }
+
+  public boolean noDataEmptyPollsReached() {
+    return emptyPolls >= maxEmptyPolls;
+  }
+
+}

+ 9 - 14
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java

@@ -2,8 +2,6 @@ package com.provectus.kafka.ui.emitter;
 
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
-import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
-import com.provectus.kafka.ui.util.PollingThrottler;
 import java.util.function.Supplier;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
@@ -24,9 +22,9 @@ public class ForwardRecordEmitter
   public ForwardRecordEmitter(
       Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
       ConsumerPosition position,
-      ConsumerRecordDeserializer recordDeserializer,
-      PollingThrottler throttler) {
-    super(recordDeserializer, throttler);
+      MessagesProcessing messagesProcessing,
+      PollingSettings pollingSettings) {
+    super(messagesProcessing, pollingSettings);
     this.position = position;
     this.consumerSupplier = consumerSupplier;
   }
@@ -39,23 +37,20 @@ public class ForwardRecordEmitter
       var seekOperations = SeekOperations.create(consumer, position);
       seekOperations.assignAndSeekNonEmptyPartitions();
 
-      // we use empty polls counting to verify that topic was fully read
-      int emptyPolls = 0;
+      EmptyPollsCounter emptyPolls = pollingSettings.createEmptyPollsCounter();
       while (!sink.isCancelled()
+          && !sendLimitReached()
           && !seekOperations.assignedPartitionsFullyPolled()
-          && emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT) {
+          && !emptyPolls.noDataEmptyPollsReached()) {
 
         sendPhase(sink, "Polling");
         ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
+        emptyPolls.count(records);
+
         log.debug("{} records polled", records.count());
-        emptyPolls = records.isEmpty() ? emptyPolls + 1 : 0;
 
         for (ConsumerRecord<Bytes, Bytes> msg : records) {
-          if (!sink.isCancelled()) {
-            sendMessage(sink, msg);
-          } else {
-            break;
-          }
+          sendMessage(sink, msg);
         }
       }
       sendFinishStatsAndCompleteSink(sink);

+ 0 - 16
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessageFilterStats.java

@@ -1,16 +0,0 @@
-package com.provectus.kafka.ui.emitter;
-
-import java.util.concurrent.atomic.AtomicLong;
-import lombok.AccessLevel;
-import lombok.Getter;
-
-public class MessageFilterStats {
-
-  @Getter(AccessLevel.PACKAGE)
-  private final AtomicLong filterApplyErrors = new AtomicLong();
-
-  public final void incrementApplyErrors() {
-    filterApplyErrors.incrementAndGet();
-  }
-
-}

+ 82 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/MessagesProcessing.java

@@ -0,0 +1,82 @@
+package com.provectus.kafka.ui.emitter;
+
+import com.provectus.kafka.ui.model.TopicMessageDTO;
+import com.provectus.kafka.ui.model.TopicMessageEventDTO;
+import com.provectus.kafka.ui.model.TopicMessagePhaseDTO;
+import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
+import java.util.function.Predicate;
+import javax.annotation.Nullable;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.common.utils.Bytes;
+import reactor.core.publisher.FluxSink;
+
+@Slf4j
+public class MessagesProcessing {
+
+  private final ConsumingStats consumingStats = new ConsumingStats();
+  private long sentMessages = 0;
+  private int filterApplyErrors = 0;
+
+  private final ConsumerRecordDeserializer deserializer;
+  private final Predicate<TopicMessageDTO> filter;
+  private final @Nullable Integer limit;
+
+  public MessagesProcessing(ConsumerRecordDeserializer deserializer,
+                            Predicate<TopicMessageDTO> filter,
+                            @Nullable Integer limit) {
+    this.deserializer = deserializer;
+    this.filter = filter;
+    this.limit = limit;
+  }
+
+  boolean limitReached() {
+    return limit != null && sentMessages >= limit;
+  }
+
+  void sendMsg(FluxSink<TopicMessageEventDTO> sink, ConsumerRecord<Bytes, Bytes> rec) {
+    if (!sink.isCancelled() && !limitReached()) {
+      TopicMessageDTO topicMessage = deserializer.deserialize(rec);
+      try {
+        if (filter.test(topicMessage)) {
+          sink.next(
+              new TopicMessageEventDTO()
+                  .type(TopicMessageEventDTO.TypeEnum.MESSAGE)
+                  .message(topicMessage)
+          );
+          sentMessages++;
+        }
+      } catch (Exception e) {
+        filterApplyErrors++;
+        log.trace("Error applying filter for message {}", topicMessage);
+      }
+    }
+  }
+
+  int sentConsumingInfo(FluxSink<TopicMessageEventDTO> sink,
+                        ConsumerRecords<Bytes, Bytes> polledRecords,
+                        long elapsed) {
+    if (!sink.isCancelled()) {
+      return consumingStats.sendConsumingEvt(sink, polledRecords, elapsed, filterApplyErrors);
+    }
+    return 0;
+  }
+
+  void sendFinishEvent(FluxSink<TopicMessageEventDTO> sink) {
+    if (!sink.isCancelled()) {
+      consumingStats.sendFinishEvent(sink, filterApplyErrors);
+    }
+  }
+
+  void sendPhase(FluxSink<TopicMessageEventDTO> sink, String name) {
+    if (!sink.isCancelled()) {
+      sink.next(
+          new TopicMessageEventDTO()
+              .type(TopicMessageEventDTO.TypeEnum.PHASE)
+              .phase(new TopicMessagePhaseDTO().name(name))
+      );
+    }
+  }
+
+}

+ 79 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PollingSettings.java

@@ -0,0 +1,79 @@
+package com.provectus.kafka.ui.emitter;
+
+import com.provectus.kafka.ui.config.ClustersProperties;
+import java.time.Duration;
+import java.util.Optional;
+import java.util.function.Supplier;
+
+public class PollingSettings {
+
+  private static final Duration DEFAULT_POLL_TIMEOUT = Duration.ofMillis(1_000);
+  private static final Duration DEFAULT_PARTITION_POLL_TIMEOUT = Duration.ofMillis(200);
+  private static final int DEFAULT_NO_DATA_EMPTY_POLLS = 3;
+
+  private final Duration pollTimeout;
+  private final Duration partitionPollTimeout;
+  private final int notDataEmptyPolls; //see EmptyPollsCounter docs
+
+  private final Supplier<PollingThrottler> throttlerSupplier;
+
+  public static PollingSettings create(ClustersProperties.Cluster cluster,
+                                       ClustersProperties clustersProperties) {
+    var pollingProps = Optional.ofNullable(clustersProperties.getPolling())
+        .orElseGet(ClustersProperties.PollingProperties::new);
+
+    var pollTimeout = pollingProps.getPollTimeoutMs() != null
+        ? Duration.ofMillis(pollingProps.getPollTimeoutMs())
+        : DEFAULT_POLL_TIMEOUT;
+
+    var partitionPollTimeout = pollingProps.getPartitionPollTimeout() != null
+        ? Duration.ofMillis(pollingProps.getPartitionPollTimeout())
+        : Duration.ofMillis(pollTimeout.toMillis() / 5);
+
+    int noDataEmptyPolls = pollingProps.getNoDataEmptyPolls() != null
+        ? pollingProps.getNoDataEmptyPolls()
+        : DEFAULT_NO_DATA_EMPTY_POLLS;
+
+    return new PollingSettings(
+        pollTimeout,
+        partitionPollTimeout,
+        noDataEmptyPolls,
+        PollingThrottler.throttlerSupplier(cluster)
+    );
+  }
+
+  public static PollingSettings createDefault() {
+    return new PollingSettings(
+        DEFAULT_POLL_TIMEOUT,
+        DEFAULT_PARTITION_POLL_TIMEOUT,
+        DEFAULT_NO_DATA_EMPTY_POLLS,
+        PollingThrottler::noop
+    );
+  }
+
+  private PollingSettings(Duration pollTimeout,
+                          Duration partitionPollTimeout,
+                          int notDataEmptyPolls,
+                          Supplier<PollingThrottler> throttlerSupplier) {
+    this.pollTimeout = pollTimeout;
+    this.partitionPollTimeout = partitionPollTimeout;
+    this.notDataEmptyPolls = notDataEmptyPolls;
+    this.throttlerSupplier = throttlerSupplier;
+  }
+
+  public EmptyPollsCounter createEmptyPollsCounter() {
+    return new EmptyPollsCounter(notDataEmptyPolls);
+  }
+
+  public Duration getPollTimeout() {
+    return pollTimeout;
+  }
+
+  public Duration getPartitionPollTimeout() {
+    return partitionPollTimeout;
+  }
+
+  public PollingThrottler getPollingThrottler() {
+    return throttlerSupplier.get();
+  }
+}

+ 4 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/PollingThrottler.java → kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/PollingThrottler.java

@@ -1,10 +1,9 @@
-package com.provectus.kafka.ui.util;
+package com.provectus.kafka.ui.emitter;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.RateLimiter;
 import com.provectus.kafka.ui.config.ClustersProperties;
-import com.provectus.kafka.ui.model.KafkaCluster;
-import java.util.Optional;
+import com.provectus.kafka.ui.util.ConsumerRecordsUtil;
 import java.util.function.Supplier;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.consumer.ConsumerRecords;
@@ -14,8 +13,8 @@ import org.apache.kafka.common.utils.Bytes;
 public class PollingThrottler {
 
   public static Supplier<PollingThrottler> throttlerSupplier(ClustersProperties.Cluster cluster) {
-    long rate = cluster.getPollingThrottleRate();
-    if (rate <= 0) {
+    Long rate = cluster.getPollingThrottleRate();
+    if (rate == null || rate <= 0) {
       return PollingThrottler::noop;
     }
     // RateLimiter instance should be shared across all created throttlers

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ResultSizeLimiter.java → kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ResultSizeLimiter.java

@@ -1,4 +1,4 @@
-package com.provectus.kafka.ui.util;
+package com.provectus.kafka.ui.emitter;
 
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
 import java.util.concurrent.atomic.AtomicInteger;

+ 3 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/TailingEmitter.java

@@ -2,8 +2,6 @@ package com.provectus.kafka.ui.emitter;
 
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.TopicMessageEventDTO;
-import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
-import com.provectus.kafka.ui.util.PollingThrottler;
 import java.util.HashMap;
 import java.util.function.Supplier;
 import lombok.extern.slf4j.Slf4j;
@@ -21,9 +19,9 @@ public class TailingEmitter extends AbstractEmitter
 
   public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
                         ConsumerPosition consumerPosition,
-                        ConsumerRecordDeserializer recordDeserializer,
-                        PollingThrottler throttler) {
-    super(recordDeserializer, throttler);
+                        MessagesProcessing messagesProcessing,
+                        PollingSettings pollingSettings) {
+    super(messagesProcessing, pollingSettings);
     this.consumerSupplier = consumerSupplier;
     this.consumerPosition = consumerPosition;
   }

+ 3 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java

@@ -29,7 +29,9 @@ public enum ErrorCode {
   RECREATE_TOPIC_TIMEOUT(4015, HttpStatus.REQUEST_TIMEOUT),
   INVALID_ENTITY_STATE(4016, HttpStatus.BAD_REQUEST),
   SCHEMA_NOT_DELETED(4017, HttpStatus.INTERNAL_SERVER_ERROR),
-  TOPIC_ANALYSIS_ERROR(4018, HttpStatus.BAD_REQUEST);
+  TOPIC_ANALYSIS_ERROR(4018, HttpStatus.BAD_REQUEST),
+  FILE_UPLOAD_EXCEPTION(4019, HttpStatus.INTERNAL_SERVER_ERROR),
+  ;
 
   static {
     // codes uniqueness check

+ 19 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/FileUploadException.java

@@ -0,0 +1,19 @@
+package com.provectus.kafka.ui.exception;
+
+import java.nio.file.Path;
+
+public class FileUploadException extends CustomBaseException {
+
+  public FileUploadException(String msg, Throwable cause) {
+    super(msg, cause);
+  }
+
+  public FileUploadException(Path path, Throwable cause) {
+    super("Error uploading file %s".formatted(path), cause);
+  }
+
+  @Override
+  public ErrorCode getErrorCode() {
+    return ErrorCode.FILE_UPLOAD_EXCEPTION;
+  }
+}

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java

@@ -134,7 +134,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
         .timestamp(currentTimestamp())
         .stackTrace(Throwables.getStackTraceAsString(exception));
     return ServerResponse
-        .status(exception.getStatus())
+        .status(exception.getStatusCode())
         .contentType(MediaType.APPLICATION_JSON)
         .bodyValue(response);
   }

+ 2 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaCompatibilityException.java

@@ -1,8 +1,8 @@
 package com.provectus.kafka.ui.exception;
 
 public class SchemaCompatibilityException extends CustomBaseException {
-  public SchemaCompatibilityException(String message) {
-    super(message);
+  public SchemaCompatibilityException() {
+    super("Schema being registered is incompatible with an earlier schema");
   }
 
   @Override

+ 0 - 12
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/SchemaTypeNotSupportedException.java

@@ -1,12 +0,0 @@
-package com.provectus.kafka.ui.exception;
-
-public class SchemaTypeNotSupportedException extends UnprocessableEntityException {
-
-  private static final String REQUIRED_SCHEMA_REGISTRY_VERSION = "5.5.0";
-
-  public SchemaTypeNotSupportedException() {
-    super(String.format("Current version of Schema Registry does "
-        + "not support provided schema type,"
-        + " version %s or later is required here.", REQUIRED_SCHEMA_REGISTRY_VERSION));
-  }
-}

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio