浏览代码

Added checkstyle (#296)

German Osin 4 年之前
父节点
当前提交
590bdfb610
共有 77 个文件被更改,包括 4066 次插入3307 次删除
  1. 11 0
      etc/checkstyle/apache-header.txt
  2. 333 0
      etc/checkstyle/checkstyle.xml
  3. 32 5
      kafka-ui-api/pom.xml
  4. 3 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/KafkaUiApplication.java
  5. 4 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KafkaConnectClients.java
  6. 74 56
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/RetryingKafkaConnectClient.java
  7. 22 23
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
  8. 25 25
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java
  9. 17 17
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java
  10. 10 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java
  11. 25 24
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java
  12. 8 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/SecurityConfig.java
  13. 5 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java
  14. 5 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java
  15. 7 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
  16. 96 63
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java
  17. 9 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
  18. 20 12
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java
  19. 14 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java
  20. 34 34
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/DeserializationService.java
  21. 26 25
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/ProtobufFileRecordDeserializer.java
  22. 1 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/RecordDeserializer.java
  23. 175 171
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/SchemaRegistryRecordDeserializer.java
  24. 9 9
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/SimpleRecordDeserializer.java
  25. 16 15
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/CustomBaseException.java
  26. 8 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/DuplicateEntityException.java
  27. 17 18
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorAttributes.java
  28. 31 24
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java
  29. 8 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/NotFoundException.java
  30. 8 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ReadOnlyException.java
  31. 8 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/RebalanceInProgressException.java
  32. 8 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/UnprocessableEntityException.java
  33. 7 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ValidationException.java
  34. 117 81
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
  35. 15 8
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java
  36. 3 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java
  37. 11 12
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ExtendedAdminClient.java
  38. 14 14
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/Feature.java
  39. 2 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBrokerDiskUsage.java
  40. 2 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBrokerMetrics.java
  41. 19 20
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalClusterMetrics.java
  42. 10 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalPartition.java
  43. 3 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalReplica.java
  44. 3 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSegmentSizeDto.java
  45. 13 14
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopic.java
  46. 3 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopicConfig.java
  47. 20 21
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
  48. 2 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaConnectCluster.java
  49. 2 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/InternalCompatibilityCheck.java
  50. 1 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/InternalCompatibilityLevel.java
  51. 7 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/InternalNewSchema.java
  52. 1 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/SubjectIdResponse.java
  53. 228 192
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
  54. 12 13
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java
  55. 37 37
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java
  56. 179 173
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java
  57. 175 156
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java
  58. 536 463
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
  59. 5 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MetricsUpdateService.java
  60. 249 217
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java
  61. 24 25
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ZookeeperService.java
  62. 229 199
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
  63. 112 96
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxClusterUtil.java
  64. 27 27
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxMetricsName.java
  65. 6 6
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxMetricsValueName.java
  66. 19 20
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxPoolFactory.java
  67. 55 31
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/KafkaConstants.java
  68. 39 39
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/AbstractBaseTest.java
  69. 284 264
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConnectServiceTests.java
  70. 52 53
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java
  71. 75 76
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/ReadOnlyModeTests.java
  72. 225 213
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/SchemaRegistryServiceTests.java
  73. 38 39
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/container/KafkaConnectContainer.java
  74. 19 19
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/container/SchemaRegistryContainer.java
  75. 24 21
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/deserialization/SchemaRegistryRecordDeserializerTest.java
  76. 20 21
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/producer/KafkaTestProducer.java
  77. 73 75
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java

+ 11 - 0
etc/checkstyle/apache-header.txt

@@ -0,0 +1,11 @@
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.

+ 333 - 0
etc/checkstyle/checkstyle.xml

@@ -0,0 +1,333 @@
+<?xml version="1.0"?>
+<!DOCTYPE module PUBLIC
+        "-//Checkstyle//DTD Checkstyle Configuration 1.3//EN"
+        "https://checkstyle.org/dtds/configuration_1_3.dtd">
+
+<!--
+    Checkstyle configuration that checks the Google coding conventions from Google Java Style
+    that can be found at https://google.github.io/styleguide/javaguide.html
+
+    Checkstyle is very configurable. Be sure to read the documentation at
+    http://checkstyle.org (or in your downloaded distribution).
+
+    To completely disable a check, just comment it out or delete it from the file.
+    To suppress certain violations please review suppression filters.
+
+    Authors: Max Vetrenko, Ruslan Diachenko, Roman Ivanov.
+ -->
+
+<module name = "Checker">
+    <property name="charset" value="UTF-8"/>
+
+    <property name="severity" value="warning"/>
+
+    <property name="fileExtensions" value="java, properties, xml"/>
+    <!-- Excludes all 'module-info.java' files              -->
+    <!-- See https://checkstyle.org/config_filefilters.html -->
+    <module name="BeforeExecutionExclusionFileFilter">
+        <property name="fileNamePattern" value="module\-info\.java$"/>
+    </module>
+    <!-- https://checkstyle.org/config_filters.html#SuppressionFilter -->
+    <module name="SuppressionFilter">
+        <property name="file" value="${org.checkstyle.google.suppressionfilter.config}"
+                  default="checkstyle-suppressions.xml" />
+        <property name="optional" value="true"/>
+    </module>
+
+    <!-- Checks for whitespace                               -->
+    <!-- See http://checkstyle.org/config_whitespace.html -->
+    <module name="FileTabCharacter">
+        <property name="eachLine" value="true"/>
+    </module>
+
+    <module name="LineLength">
+        <property name="fileExtensions" value="java"/>
+        <property name="max" value="100"/>
+        <property name="ignorePattern" value="^package.*|^import.*|a href|href|http://|https://|ftp://"/>
+    </module>
+
+    <module name="TreeWalker">
+        <module name="OuterTypeFilename"/>
+        <module name="IllegalTokenText">
+            <property name="tokens" value="STRING_LITERAL, CHAR_LITERAL"/>
+            <property name="format"
+                      value="\\u00(09|0(a|A)|0(c|C)|0(d|D)|22|27|5(C|c))|\\(0(10|11|12|14|15|42|47)|134)"/>
+            <property name="message"
+                      value="Consider using special escape sequence instead of octal value or Unicode escaped value."/>
+        </module>
+        <module name="AvoidEscapedUnicodeCharacters">
+            <property name="allowEscapesForControlCharacters" value="true"/>
+            <property name="allowByTailComment" value="true"/>
+            <property name="allowNonPrintableEscapes" value="true"/>
+        </module>
+        <module name="AvoidStarImport"/>
+        <module name="OneTopLevelClass"/>
+        <module name="NoLineWrap">
+            <property name="tokens" value="PACKAGE_DEF, IMPORT, STATIC_IMPORT"/>
+        </module>
+        <module name="EmptyBlock">
+            <property name="option" value="TEXT"/>
+            <property name="tokens"
+                      value="LITERAL_TRY, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE, LITERAL_SWITCH"/>
+        </module>
+        <module name="NeedBraces">
+            <property name="tokens"
+                      value="LITERAL_DO, LITERAL_ELSE, LITERAL_FOR, LITERAL_IF, LITERAL_WHILE"/>
+        </module>
+        <module name="LeftCurly">
+            <property name="tokens"
+                      value="ANNOTATION_DEF, CLASS_DEF, CTOR_DEF, ENUM_CONSTANT_DEF, ENUM_DEF,
+                    INTERFACE_DEF, LAMBDA, LITERAL_CASE, LITERAL_CATCH, LITERAL_DEFAULT,
+                    LITERAL_DO, LITERAL_ELSE, LITERAL_FINALLY, LITERAL_FOR, LITERAL_IF,
+                    LITERAL_SWITCH, LITERAL_SYNCHRONIZED, LITERAL_TRY, LITERAL_WHILE, METHOD_DEF,
+                    OBJBLOCK, STATIC_INIT"/>
+        </module>
+        <module name="RightCurly">
+            <property name="id" value="RightCurlySame"/>
+            <property name="tokens"
+                      value="LITERAL_TRY, LITERAL_CATCH, LITERAL_FINALLY, LITERAL_IF, LITERAL_ELSE,
+                    LITERAL_DO"/>
+        </module>
+        <module name="RightCurly">
+            <property name="id" value="RightCurlyAlone"/>
+            <property name="option" value="alone"/>
+            <property name="tokens"
+                      value="CLASS_DEF, METHOD_DEF, CTOR_DEF, LITERAL_FOR, LITERAL_WHILE, STATIC_INIT,
+                    INSTANCE_INIT, ANNOTATION_DEF, ENUM_DEF"/>
+        </module>
+        <module name="SuppressionXpathSingleFilter">
+            <!-- suppresion is required till https://github.com/checkstyle/checkstyle/issues/7541 -->
+            <property name="id" value="RightCurlyAlone"/>
+            <property name="query" value="//RCURLY[parent::SLIST[count(./*)=1]
+                                                 or preceding-sibling::*[last()][self::LCURLY]]"/>
+        </module>
+        <module name="WhitespaceAfter">
+            <property name="tokens"
+                      value="COMMA, SEMI, TYPECAST, LITERAL_IF, LITERAL_ELSE,
+                    LITERAL_WHILE, LITERAL_DO, LITERAL_FOR, DO_WHILE"/>
+        </module>
+        <module name="WhitespaceAround">
+            <property name="allowEmptyConstructors" value="true"/>
+            <property name="allowEmptyLambdas" value="true"/>
+            <property name="allowEmptyMethods" value="true"/>
+            <property name="allowEmptyTypes" value="true"/>
+            <property name="allowEmptyLoops" value="true"/>
+            <property name="tokens"
+                      value="ASSIGN, BAND, BAND_ASSIGN, BOR, BOR_ASSIGN, BSR, BSR_ASSIGN, BXOR,
+                    BXOR_ASSIGN, COLON, DIV, DIV_ASSIGN, DO_WHILE, EQUAL, GE, GT, LAMBDA, LAND,
+                    LCURLY, LE, LITERAL_CATCH, LITERAL_DO, LITERAL_ELSE, LITERAL_FINALLY,
+                    LITERAL_FOR, LITERAL_IF, LITERAL_RETURN, LITERAL_SWITCH, LITERAL_SYNCHRONIZED,
+                     LITERAL_TRY, LITERAL_WHILE, LOR, LT, MINUS, MINUS_ASSIGN, MOD, MOD_ASSIGN,
+                     NOT_EQUAL, PLUS, PLUS_ASSIGN, QUESTION, RCURLY, SL, SLIST, SL_ASSIGN, SR,
+                     SR_ASSIGN, STAR, STAR_ASSIGN, LITERAL_ASSERT, TYPE_EXTENSION_AND"/>
+            <message key="ws.notFollowed"
+                     value="WhitespaceAround: ''{0}'' is not followed by whitespace. Empty blocks may only be represented as '{}' when not part of a multi-block statement (4.1.3)"/>
+            <message key="ws.notPreceded"
+                     value="WhitespaceAround: ''{0}'' is not preceded with whitespace."/>
+        </module>
+        <module name="OneStatementPerLine"/>
+        <module name="MultipleVariableDeclarations"/>
+        <module name="ArrayTypeStyle"/>
+        <module name="MissingSwitchDefault"/>
+        <module name="FallThrough"/>
+        <module name="UpperEll"/>
+        <module name="ModifierOrder"/>
+        <module name="EmptyLineSeparator">
+            <property name="tokens"
+                      value="PACKAGE_DEF, IMPORT, STATIC_IMPORT, CLASS_DEF, INTERFACE_DEF, ENUM_DEF,
+                    STATIC_INIT, INSTANCE_INIT, METHOD_DEF, CTOR_DEF, VARIABLE_DEF"/>
+            <property name="allowNoEmptyLineBetweenFields" value="true"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapDot"/>
+            <property name="tokens" value="DOT"/>
+            <property name="option" value="nl"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapComma"/>
+            <property name="tokens" value="COMMA"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <!-- ELLIPSIS is EOL until https://github.com/google/styleguide/issues/258 -->
+            <property name="id" value="SeparatorWrapEllipsis"/>
+            <property name="tokens" value="ELLIPSIS"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <!-- ARRAY_DECLARATOR is EOL until https://github.com/google/styleguide/issues/259 -->
+            <property name="id" value="SeparatorWrapArrayDeclarator"/>
+            <property name="tokens" value="ARRAY_DECLARATOR"/>
+            <property name="option" value="EOL"/>
+        </module>
+        <module name="SeparatorWrap">
+            <property name="id" value="SeparatorWrapMethodRef"/>
+            <property name="tokens" value="METHOD_REF"/>
+            <property name="option" value="nl"/>
+        </module>
+        <module name="PackageName">
+            <property name="format" value="^[a-z]+(\.[a-z][a-z0-9]*)*$"/>
+            <message key="name.invalidPattern"
+                     value="Package name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="TypeName">
+            <property name="tokens" value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, ANNOTATION_DEF"/>
+            <message key="name.invalidPattern"
+                     value="Type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="MemberName">
+            <property name="format" value="^[a-z][a-z0-9][a-zA-Z0-9]*$"/>
+            <message key="name.invalidPattern"
+                     value="Member name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="ParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern"
+                     value="Parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="LambdaParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern"
+                     value="Lambda parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="CatchParameterName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern"
+                     value="Catch parameter name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="LocalVariableName">
+            <property name="format" value="^[a-z]([a-z0-9][a-zA-Z0-9]*)?$"/>
+            <message key="name.invalidPattern"
+                     value="Local variable name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="ClassTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern"
+                     value="Class type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="MethodTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern"
+                     value="Method type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="InterfaceTypeParameterName">
+            <property name="format" value="(^[A-Z][0-9]?)$|([A-Z][a-zA-Z0-9]*[T]$)"/>
+            <message key="name.invalidPattern"
+                     value="Interface type name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="NoFinalizer"/>
+        <module name="GenericWhitespace">
+            <message key="ws.followed"
+                     value="GenericWhitespace ''{0}'' is followed by whitespace."/>
+            <message key="ws.preceded"
+                     value="GenericWhitespace ''{0}'' is preceded with whitespace."/>
+            <message key="ws.illegalFollow"
+                     value="GenericWhitespace ''{0}'' should followed by whitespace."/>
+            <message key="ws.notPreceded"
+                     value="GenericWhitespace ''{0}'' is not preceded with whitespace."/>
+        </module>
+        <module name="Indentation">
+            <property name="basicOffset" value="2"/>
+            <property name="braceAdjustment" value="0"/>
+            <property name="caseIndent" value="2"/>
+            <property name="throwsIndent" value="4"/>
+            <property name="lineWrappingIndentation" value="4"/>
+            <property name="arrayInitIndent" value="2"/>
+        </module>
+        <module name="AbbreviationAsWordInName">
+            <property name="ignoreFinal" value="false"/>
+            <property name="allowedAbbreviationLength" value="1"/>
+            <property name="tokens"
+                      value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, ANNOTATION_DEF, ANNOTATION_FIELD_DEF,
+                    PARAMETER_DEF, VARIABLE_DEF, METHOD_DEF"/>
+        </module>
+        <module name="OverloadMethodsDeclarationOrder"/>
+        <module name="VariableDeclarationUsageDistance"/>
+        <module name="CustomImportOrder">
+            <property name="sortImportsInGroupAlphabetically" value="true"/>
+            <property name="separateLineBetweenGroups" value="true"/>
+            <property name="customImportOrderRules" value="STATIC###THIRD_PARTY_PACKAGE"/>
+            <property name="tokens" value="IMPORT, STATIC_IMPORT, PACKAGE_DEF"/>
+        </module>
+        <module name="MethodParamPad">
+            <property name="tokens"
+                      value="CTOR_DEF, LITERAL_NEW, METHOD_CALL, METHOD_DEF,
+                    SUPER_CTOR_CALL, ENUM_CONSTANT_DEF"/>
+        </module>
+        <module name="NoWhitespaceBefore">
+            <property name="tokens"
+                      value="COMMA, SEMI, POST_INC, POST_DEC, DOT, ELLIPSIS,
+                    LABELED_STAT, METHOD_REF"/>
+            <property name="allowLineBreaks" value="true"/>
+        </module>
+        <module name="ParenPad">
+            <property name="tokens"
+                      value="ANNOTATION, ANNOTATION_FIELD_DEF, CTOR_CALL, CTOR_DEF, DOT, ENUM_CONSTANT_DEF,
+                    EXPR, LITERAL_CATCH, LITERAL_DO, LITERAL_FOR, LITERAL_IF, LITERAL_NEW,
+                    LITERAL_SWITCH, LITERAL_SYNCHRONIZED, LITERAL_WHILE, METHOD_CALL,
+                    METHOD_DEF, QUESTION, RESOURCE_SPECIFICATION, SUPER_CTOR_CALL, LAMBDA"/>
+        </module>
+        <module name="OperatorWrap">
+            <property name="option" value="NL"/>
+            <property name="tokens"
+                      value="BAND, BOR, BSR, BXOR, DIV, EQUAL, GE, GT, LAND, LE, LITERAL_INSTANCEOF, LOR,
+                    LT, MINUS, MOD, NOT_EQUAL, PLUS, QUESTION, SL, SR, STAR, METHOD_REF "/>
+        </module>
+        <module name="AnnotationLocation">
+            <property name="id" value="AnnotationLocationMostCases"/>
+            <property name="tokens"
+                      value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, METHOD_DEF, CTOR_DEF"/>
+        </module>
+        <module name="AnnotationLocation">
+            <property name="id" value="AnnotationLocationVariables"/>
+            <property name="tokens" value="VARIABLE_DEF"/>
+            <property name="allowSamelineMultipleAnnotations" value="true"/>
+        </module>
+        <module name="NonEmptyAtclauseDescription"/>
+        <module name="InvalidJavadocPosition"/>
+        <module name="JavadocTagContinuationIndentation"/>
+        <module name="SummaryJavadoc">
+            <property name="forbiddenSummaryFragments"
+                      value="^@return the *|^This method returns |^A [{]@code [a-zA-Z0-9]+[}]( is a )"/>
+        </module>
+        <module name="JavadocParagraph"/>
+        <module name="AtclauseOrder">
+            <property name="tagOrder" value="@param, @return, @throws, @deprecated"/>
+            <property name="target"
+                      value="CLASS_DEF, INTERFACE_DEF, ENUM_DEF, METHOD_DEF, CTOR_DEF, VARIABLE_DEF"/>
+        </module>
+        <module name="JavadocMethod">
+            <property name="scope" value="public"/>
+            <property name="allowMissingParamTags" value="true"/>
+            <property name="allowMissingReturnTag" value="true"/>
+            <property name="allowedAnnotations" value="Override, Test"/>
+            <property name="tokens" value="METHOD_DEF, CTOR_DEF, ANNOTATION_FIELD_DEF"/>
+        </module>
+<!--        <module name="MissingJavadocMethod">-->
+<!--            <property name="scope" value="public"/>-->
+<!--            <property name="minLineCount" value="2"/>-->
+<!--            <property name="allowedAnnotations" value="Override, Test"/>-->
+<!--            <property name="tokens" value="METHOD_DEF, CTOR_DEF, ANNOTATION_FIELD_DEF"/>-->
+<!--        </module>-->
+        <module name="MethodName">
+            <property name="format" value="^[a-z][a-z0-9][a-zA-Z0-9_]*$"/>
+            <message key="name.invalidPattern"
+                     value="Method name ''{0}'' must match pattern ''{1}''."/>
+        </module>
+        <module name="SingleLineJavadoc">
+            <property name="ignoreInlineTags" value="false"/>
+        </module>
+        <module name="EmptyCatchBlock">
+            <property name="exceptionVariableName" value="expected"/>
+        </module>
+        <module name="CommentsIndentation">
+            <property name="tokens" value="SINGLE_LINE_COMMENT, BLOCK_COMMENT_BEGIN"/>
+        </module>
+        <!-- https://checkstyle.org/config_filters.html#SuppressionXpathFilter -->
+        <module name="SuppressionXpathFilter">
+            <property name="file" value="${org.checkstyle.google.suppressionxpathfilter.config}"
+                      default="checkstyle-xpath-suppressions.xml" />
+            <property name="optional" value="true"/>
+        </module>
+    </module>
+</module>

+ 32 - 5
kafka-ui-api/pom.xml

@@ -1,7 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project xmlns="http://maven.apache.org/POM/4.0.0"
-         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <parent>
         <artifactId>kafka-ui</artifactId>
         <groupId>com.provectus</groupId>
@@ -213,11 +212,39 @@
                 <artifactId>maven-surefire-plugin</artifactId>
                 <version>${maven-surefire-plugin.version}</version>
                 <configuration>
-                    <argLine>
-                        --illegal-access=permit
+                    <argLine> --illegal-access=permit
                     </argLine>
                 </configuration>
             </plugin>
+            <plugin>
+                <groupId>org.apache.maven.plugins</groupId>
+                <artifactId>maven-checkstyle-plugin</artifactId>
+                <version>3.1.1</version>
+                <dependencies>
+                    <dependency>
+                        <groupId>com.puppycrawl.tools</groupId>
+                        <artifactId>checkstyle</artifactId>
+                        <version>8.32</version>
+                    </dependency>
+                </dependencies>
+                <executions>
+                    <execution>
+                        <id>checkstyle</id>
+                        <phase>validate</phase>
+                        <goals>
+                            <goal>check</goal>
+                        </goals>
+                        <configuration>
+                            <violationSeverity>warning</violationSeverity>
+                            <failOnViolation>true</failOnViolation>
+                            <failsOnError>true</failsOnError>
+                            <includeTestSourceDirectory>true</includeTestSourceDirectory>
+                            <configLocation>file:${basedir}/../etc/checkstyle/checkstyle.xml</configLocation>
+                            <headerLocation>file:${basedir}/../etc/checkstyle/apache-header.txt</headerLocation>
+                        </configuration>
+                    </execution>
+                </executions>
+            </plugin>
         </plugins>
     </build>
 
@@ -299,7 +326,7 @@
                                 </goals>
                                 <configuration>
                                     <tag>${git.revision}</tag>
-									                  <repository>provectuslabs/kafka-ui</repository>
+                                    <repository>provectuslabs/kafka-ui</repository>
                                     <buildArgs>
                                         <JAR_FILE>${project.build.finalName}.jar</JAR_FILE>
                                         <JAR_NAME>${project.artifactId}.jar</JAR_NAME>

+ 3 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/KafkaUiApplication.java

@@ -10,7 +10,7 @@ import org.springframework.scheduling.annotation.EnableScheduling;
 @EnableAsync
 public class KafkaUiApplication {
 
-	public static void main(String[] args) {
-		SpringApplication.run(KafkaUiApplication.class, args);
-	}
+  public static void main(String[] args) {
+    SpringApplication.run(KafkaUiApplication.class, args);
+  }
 }

+ 4 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KafkaConnectClients.java

@@ -1,15 +1,14 @@
 package com.provectus.kafka.ui.client;
 
 import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
-
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 
 public final class KafkaConnectClients {
 
-    private static final Map<String, KafkaConnectClientApi> CACHE = new ConcurrentHashMap<>();
+  private static final Map<String, KafkaConnectClientApi> CACHE = new ConcurrentHashMap<>();
 
-    public static KafkaConnectClientApi withBaseUrl(String basePath) {
-        return CACHE.computeIfAbsent(basePath, RetryingKafkaConnectClient::new);
-    }
+  public static KafkaConnectClientApi withBaseUrl(String basePath) {
+    return CACHE.computeIfAbsent(basePath, RetryingKafkaConnectClient::new);
+  }
 }

+ 74 - 56
kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/RetryingKafkaConnectClient.java

@@ -1,11 +1,13 @@
 package com.provectus.kafka.ui.client;
 
-import com.provectus.kafka.ui.exception.RebalanceInProgressException;
-import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.connect.ApiClient;
 import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
 import com.provectus.kafka.ui.connect.model.Connector;
 import com.provectus.kafka.ui.connect.model.NewConnector;
+import com.provectus.kafka.ui.exception.RebalanceInProgressException;
+import com.provectus.kafka.ui.exception.ValidationException;
+import java.util.List;
+import java.util.Map;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.core.ParameterizedTypeReference;
 import org.springframework.http.HttpHeaders;
@@ -18,70 +20,86 @@ import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 import reactor.retry.Retry;
 
-import java.util.List;
-import java.util.Map;
-
 @Log4j2
 public class RetryingKafkaConnectClient extends KafkaConnectClientApi {
-    private static final int MAX_RETRIES = 5;
+  private static final int MAX_RETRIES = 5;
 
-    public RetryingKafkaConnectClient(String basePath) {
-        super(new RetryingApiClient().setBasePath(basePath));
-    }
+  public RetryingKafkaConnectClient(String basePath) {
+    super(new RetryingApiClient().setBasePath(basePath));
+  }
 
-    @Override
-    public Mono<Connector> createConnector(NewConnector newConnector) throws RestClientException {
-        return withBadRequestErrorHandling(
-                super.createConnector(newConnector)
-        );
-    }
+  private static <T> Mono<T> withRetryOnConflict(Mono<T> publisher) {
+    return publisher.retryWhen(
+        Retry.onlyIf(e -> e.exception() instanceof WebClientResponseException.Conflict)
+            .retryMax(MAX_RETRIES)
+    )
+        .onErrorResume(WebClientResponseException.Conflict.class,
+            e -> Mono.error(new RebalanceInProgressException()))
+        .doOnError(log::error);
+  }
 
-    @Override
-    public Mono<Connector> setConnectorConfig(String connectorName, Map<String, Object> requestBody) throws RestClientException {
-        return withBadRequestErrorHandling(
-                super.setConnectorConfig(connectorName, requestBody)
-        );
-    }
+  private static <T> Flux<T> withRetryOnConflict(Flux<T> publisher) {
+    return publisher.retryWhen(
+        Retry.onlyIf(e -> e.exception() instanceof WebClientResponseException.Conflict)
+            .retryMax(MAX_RETRIES)
+    )
+        .onErrorResume(WebClientResponseException.Conflict.class,
+            e -> Mono.error(new RebalanceInProgressException()))
+        .doOnError(log::error);
+  }
 
-    private static class RetryingApiClient extends ApiClient {
-        @Override
-        public <T> Mono<T> invokeAPI(String path, HttpMethod method, Map<String, Object> pathParams, MultiValueMap<String, String> queryParams, Object body, HttpHeaders headerParams, MultiValueMap<String, String> cookieParams, MultiValueMap<String, Object> formParams, List<MediaType> accept, MediaType contentType, String[] authNames, ParameterizedTypeReference<T> returnType) throws RestClientException {
-            return withRetryOnConflict(
-                    super.invokeAPI(path, method, pathParams, queryParams, body, headerParams, cookieParams, formParams, accept, contentType, authNames, returnType)
-            );
-        }
+  private static <T> Mono<T> withBadRequestErrorHandling(Mono<T> publisher) {
+    return publisher
+        .onErrorResume(WebClientResponseException.BadRequest.class, e ->
+            Mono.error(new ValidationException("Invalid configuration")))
+        .onErrorResume(WebClientResponseException.InternalServerError.class, e ->
+            Mono.error(new ValidationException("Invalid configuration")));
+  }
 
-        @Override
-        public <T> Flux<T> invokeFluxAPI(String path, HttpMethod method, Map<String, Object> pathParams, MultiValueMap<String, String> queryParams, Object body, HttpHeaders headerParams, MultiValueMap<String, String> cookieParams, MultiValueMap<String, Object> formParams, List<MediaType> accept, MediaType contentType, String[] authNames, ParameterizedTypeReference<T> returnType) throws RestClientException {
-            return withRetryOnConflict(
-                    super.invokeFluxAPI(path, method, pathParams, queryParams, body, headerParams, cookieParams, formParams, accept, contentType, authNames, returnType)
-            );
-        }
-    }
+  @Override
+  public Mono<Connector> createConnector(NewConnector newConnector) throws RestClientException {
+    return withBadRequestErrorHandling(
+        super.createConnector(newConnector)
+    );
+  }
 
-    private static <T> Mono<T> withRetryOnConflict(Mono<T> publisher) {
-        return publisher.retryWhen(
-                Retry.onlyIf(e -> e.exception() instanceof WebClientResponseException.Conflict)
-                        .retryMax(MAX_RETRIES)
-        )
-                .onErrorResume(WebClientResponseException.Conflict.class, e -> Mono.error(new RebalanceInProgressException()))
-                .doOnError(log::error);
-    }
+  @Override
+  public Mono<Connector> setConnectorConfig(String connectorName, Map<String, Object> requestBody)
+      throws RestClientException {
+    return withBadRequestErrorHandling(
+        super.setConnectorConfig(connectorName, requestBody)
+    );
+  }
 
-    private static <T> Flux<T> withRetryOnConflict(Flux<T> publisher) {
-        return publisher.retryWhen(
-                Retry.onlyIf(e -> e.exception() instanceof WebClientResponseException.Conflict)
-                        .retryMax(MAX_RETRIES)
-        )
-                .onErrorResume(WebClientResponseException.Conflict.class, e -> Mono.error(new RebalanceInProgressException()))
-                .doOnError(log::error);
+  private static class RetryingApiClient extends ApiClient {
+    @Override
+    public <T> Mono<T> invokeAPI(String path, HttpMethod method, Map<String, Object> pathParams,
+                                 MultiValueMap<String, String> queryParams, Object body,
+                                 HttpHeaders headerParams,
+                                 MultiValueMap<String, String> cookieParams,
+                                 MultiValueMap<String, Object> formParams, List<MediaType> accept,
+                                 MediaType contentType, String[] authNames,
+                                 ParameterizedTypeReference<T> returnType)
+        throws RestClientException {
+      return withRetryOnConflict(
+          super.invokeAPI(path, method, pathParams, queryParams, body, headerParams, cookieParams,
+              formParams, accept, contentType, authNames, returnType)
+      );
     }
 
-    private static <T> Mono<T> withBadRequestErrorHandling(Mono<T> publisher) {
-        return publisher
-                .onErrorResume(WebClientResponseException.BadRequest.class, e ->
-                        Mono.error(new ValidationException("Invalid configuration")))
-                .onErrorResume(WebClientResponseException.InternalServerError.class, e ->
-                        Mono.error(new ValidationException("Invalid configuration")));
+    @Override
+    public <T> Flux<T> invokeFluxAPI(String path, HttpMethod method, Map<String, Object> pathParams,
+                                     MultiValueMap<String, String> queryParams, Object body,
+                                     HttpHeaders headerParams,
+                                     MultiValueMap<String, String> cookieParams,
+                                     MultiValueMap<String, Object> formParams,
+                                     List<MediaType> accept, MediaType contentType,
+                                     String[] authNames, ParameterizedTypeReference<T> returnType)
+        throws RestClientException {
+      return withRetryOnConflict(
+          super.invokeFluxAPI(path, method, pathParams, queryParams, body, headerParams,
+              cookieParams, formParams, accept, contentType, authNames, returnType)
+      );
     }
+  }
 }

+ 22 - 23
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java

@@ -1,38 +1,37 @@
 package com.provectus.kafka.ui.config;
 
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Properties;
 import lombok.Data;
 import org.springframework.boot.context.properties.ConfigurationProperties;
 import org.springframework.context.annotation.Configuration;
 
-import java.util.ArrayList;
-import java.util.List;
-
 @Configuration
 @ConfigurationProperties("kafka")
 @Data
 public class ClustersProperties {
 
-    List<Cluster> clusters = new ArrayList<>();
+  List<Cluster> clusters = new ArrayList<>();
 
-    @Data
-    public static class Cluster {
-        String name;
-        String bootstrapServers;
-        String zookeeper;
-        String schemaRegistry;
-        String schemaNameTemplate = "%s-value";
-        String protobufFile;
-        String protobufMessageName;
-        List<ConnectCluster> kafkaConnect;
-        int jmxPort;
-        Properties properties;
-        boolean readOnly = false;
-    }
+  @Data
+  public static class Cluster {
+    String name;
+    String bootstrapServers;
+    String zookeeper;
+    String schemaRegistry;
+    String schemaNameTemplate = "%s-value";
+    String protobufFile;
+    String protobufMessageName;
+    List<ConnectCluster> kafkaConnect;
+    int jmxPort;
+    Properties properties;
+    boolean readOnly = false;
+  }
 
-    @Data
-    public static class ConnectCluster {
-        String name;
-        String address;
-    }
+  @Data
+  public static class ConnectCluster {
+    String name;
+    String address;
+  }
 }

+ 25 - 25
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/Config.java

@@ -1,6 +1,7 @@
 package com.provectus.kafka.ui.config;
 
 import com.provectus.kafka.ui.util.JmxPoolFactory;
+import javax.management.remote.JMXConnector;
 import org.apache.commons.pool2.KeyedObjectPool;
 import org.apache.commons.pool2.impl.GenericKeyedObjectPool;
 import org.apache.commons.pool2.impl.GenericKeyedObjectPoolConfig;
@@ -9,35 +10,34 @@ import org.springframework.context.annotation.Configuration;
 import org.springframework.jmx.export.MBeanExporter;
 import org.springframework.web.reactive.function.client.WebClient;
 
-import javax.management.remote.JMXConnector;
-
 @Configuration
 public class Config {
 
-    @Bean
-    public KeyedObjectPool<String, JMXConnector> pool() {
-        GenericKeyedObjectPool<String, JMXConnector> pool = new GenericKeyedObjectPool<>(new JmxPoolFactory());
-        pool.setConfig(poolConfig());
-        return pool;
-    }
+  @Bean
+  public KeyedObjectPool<String, JMXConnector> pool() {
+    GenericKeyedObjectPool<String, JMXConnector> pool =
+        new GenericKeyedObjectPool<>(new JmxPoolFactory());
+    pool.setConfig(poolConfig());
+    return pool;
+  }
 
-    private GenericKeyedObjectPoolConfig poolConfig() {
-        GenericKeyedObjectPoolConfig poolConfig = new GenericKeyedObjectPoolConfig();
-        poolConfig.setMaxIdlePerKey(3);
-        poolConfig.setMaxTotalPerKey(3);
-        return poolConfig;
-    }
+  private GenericKeyedObjectPoolConfig poolConfig() {
+    GenericKeyedObjectPoolConfig poolConfig = new GenericKeyedObjectPoolConfig();
+    poolConfig.setMaxIdlePerKey(3);
+    poolConfig.setMaxTotalPerKey(3);
+    return poolConfig;
+  }
 
-    @Bean
-    public MBeanExporter exporter() {
-        final MBeanExporter exporter = new MBeanExporter();
-        exporter.setAutodetect(true);
-        exporter.setExcludedBeans("pool");
-        return exporter;
-    }
+  @Bean
+  public MBeanExporter exporter() {
+    final MBeanExporter exporter = new MBeanExporter();
+    exporter.setAutodetect(true);
+    exporter.setExcludedBeans("pool");
+    return exporter;
+  }
 
-    @Bean
-    public WebClient webClient() {
-        return WebClient.create();
-    }
+  @Bean
+  public WebClient webClient() {
+    return WebClient.create();
+  }
 }

+ 17 - 17
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CorsGlobalConfiguration.java

@@ -1,5 +1,7 @@
 package com.provectus.kafka.ui.config;
 
+import static org.springdoc.core.Constants.CLASSPATH_RESOURCE_LOCATION;
+
 import org.springframework.context.annotation.Configuration;
 import org.springframework.context.annotation.Profile;
 import org.springframework.web.reactive.config.CorsRegistry;
@@ -8,27 +10,25 @@ import org.springframework.web.reactive.config.ResourceHandlerRegistry;
 import org.springframework.web.reactive.config.WebFluxConfigurer;
 import org.springframework.web.reactive.resource.WebJarsResourceResolver;
 
-import static org.springdoc.core.Constants.CLASSPATH_RESOURCE_LOCATION;
-
 @Configuration
 @EnableWebFlux
 @Profile("local")
 public class CorsGlobalConfiguration implements WebFluxConfigurer {
 
-    @Override
-    public void addCorsMappings(CorsRegistry registry) {
-        registry.addMapping("/**")
-                .allowedOrigins("*")
-                .allowedMethods("*")
-                .allowedHeaders("*")
-                .allowCredentials(true);
-    }
+  @Override
+  public void addCorsMappings(CorsRegistry registry) {
+    registry.addMapping("/**")
+        .allowedOrigins("*")
+        .allowedMethods("*")
+        .allowedHeaders("*")
+        .allowCredentials(true);
+  }
 
-    @Override
-    public void addResourceHandlers(ResourceHandlerRegistry registry) {
-        registry.addResourceHandler("/webjars/**")
-                .addResourceLocations(CLASSPATH_RESOURCE_LOCATION+"/webjars/")
-                .resourceChain(true)
-                .addResolver(new WebJarsResourceResolver());
-    }
+  @Override
+  public void addResourceHandlers(ResourceHandlerRegistry registry) {
+    registry.addResourceHandler("/webjars/**")
+        .addResourceLocations(CLASSPATH_RESOURCE_LOCATION + "/webjars/")
+        .resourceChain(true)
+        .addResolver(new WebJarsResourceResolver());
+  }
 }

+ 10 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java

@@ -8,12 +8,15 @@ import reactor.core.publisher.Mono;
 
 @Component
 public class CustomWebFilter implements WebFilter {
-    @Override
-    public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) {
-        if (exchange.getRequest().getURI().getPath().equals("/") || exchange.getRequest().getURI().getPath().startsWith("/ui")) {
-            return chain.filter(exchange.mutate().request(exchange.getRequest().mutate().path("/index.html").build()).build());
-        }
-
-        return chain.filter(exchange);
+  @Override
+  public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) {
+    if (exchange.getRequest().getURI().getPath().equals("/")
+        || exchange.getRequest().getURI().getPath().startsWith("/ui")) {
+      return chain.filter(
+          exchange.mutate().request(exchange.getRequest().mutate().path("/index.html").build())
+              .build());
     }
+
+    return chain.filter(exchange);
+  }
 }

+ 25 - 24
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ReadOnlyModeFilter.java

@@ -3,6 +3,7 @@ package com.provectus.kafka.ui.config;
 import com.provectus.kafka.ui.exception.NotFoundException;
 import com.provectus.kafka.ui.exception.ReadOnlyException;
 import com.provectus.kafka.ui.service.ClustersStorage;
+import java.util.regex.Pattern;
 import lombok.RequiredArgsConstructor;
 import org.jetbrains.annotations.NotNull;
 import org.springframework.core.annotation.Order;
@@ -13,37 +14,37 @@ import org.springframework.web.server.WebFilter;
 import org.springframework.web.server.WebFilterChain;
 import reactor.core.publisher.Mono;
 
-import java.util.regex.Pattern;
-
 @Order
 @Component
 @RequiredArgsConstructor
 public class ReadOnlyModeFilter implements WebFilter {
-    private static final Pattern CLUSTER_NAME_REGEX = Pattern.compile("/api/clusters/(?<clusterName>[^/]++)");
-
-    private final ClustersStorage clustersStorage;
+  private static final Pattern CLUSTER_NAME_REGEX =
+      Pattern.compile("/api/clusters/(?<clusterName>[^/]++)");
 
-    @NotNull
-    @Override
-    public Mono<Void> filter(ServerWebExchange exchange, @NotNull WebFilterChain chain) {
-        var isSafeMethod = exchange.getRequest().getMethod() == HttpMethod.GET;
-        if (isSafeMethod) {
-            return chain.filter(exchange);
-        }
+  private final ClustersStorage clustersStorage;
 
-        var path = exchange.getRequest().getURI().getPath();
-        var matcher = CLUSTER_NAME_REGEX.matcher(path);
-        if (!matcher.find()) {
-            return chain.filter(exchange);
-        }
-        var clusterName = matcher.group("clusterName");
-        var kafkaCluster = clustersStorage.getClusterByName(clusterName)
-                .orElseThrow(() -> new NotFoundException(String.format("No cluster for name '%s'", clusterName)));
+  @NotNull
+  @Override
+  public Mono<Void> filter(ServerWebExchange exchange, @NotNull WebFilterChain chain) {
+    var isSafeMethod = exchange.getRequest().getMethod() == HttpMethod.GET;
+    if (isSafeMethod) {
+      return chain.filter(exchange);
+    }
 
-        if (!kafkaCluster.getReadOnly()) {
-            return chain.filter(exchange);
-        }
+    var path = exchange.getRequest().getURI().getPath();
+    var matcher = CLUSTER_NAME_REGEX.matcher(path);
+    if (!matcher.find()) {
+      return chain.filter(exchange);
+    }
+    var clusterName = matcher.group("clusterName");
+    var kafkaCluster = clustersStorage.getClusterByName(clusterName)
+        .orElseThrow(
+            () -> new NotFoundException(String.format("No cluster for name '%s'", clusterName)));
 
-        return Mono.error(ReadOnlyException::new);
+    if (!kafkaCluster.getReadOnly()) {
+      return chain.filter(exchange);
     }
+
+    return Mono.error(ReadOnlyException::new);
+  }
 }

+ 8 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/SecurityConfig.java

@@ -12,13 +12,13 @@ import org.springframework.security.web.server.SecurityWebFilterChain;
 @ConditionalOnProperty(value = "auth.enabled", havingValue = "false")
 public class SecurityConfig {
 
-	@Bean
-	public SecurityWebFilterChain configure(ServerHttpSecurity http) {
-		return http.authorizeExchange()
-				.anyExchange().permitAll()
-				.and()
-				.csrf().disable()
-				.build();
-	}
+  @Bean
+  public SecurityWebFilterChain configure(ServerHttpSecurity http) {
+    return http.authorizeExchange()
+        .anyExchange().permitAll()
+        .and()
+        .csrf().disable()
+        .build();
+  }
 
 }

+ 5 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/BrokersController.java

@@ -1,9 +1,9 @@
 package com.provectus.kafka.ui.controller;
 
 import com.provectus.kafka.ui.api.BrokersApi;
-import com.provectus.kafka.ui.service.ClusterService;
 import com.provectus.kafka.ui.model.Broker;
 import com.provectus.kafka.ui.model.BrokerMetrics;
+import com.provectus.kafka.ui.service.ClusterService;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.http.ResponseEntity;
@@ -19,14 +19,16 @@ public class BrokersController implements BrokersApi {
   private final ClusterService clusterService;
 
   @Override
-  public Mono<ResponseEntity<BrokerMetrics>> getBrokersMetrics(String clusterName, Integer id, ServerWebExchange exchange) {
+  public Mono<ResponseEntity<BrokerMetrics>> getBrokersMetrics(String clusterName, Integer id,
+                                                               ServerWebExchange exchange) {
     return clusterService.getBrokerMetrics(clusterName, id)
         .map(ResponseEntity::ok)
         .onErrorReturn(ResponseEntity.notFound().build());
   }
 
   @Override
-  public Mono<ResponseEntity<Flux<Broker>>> getBrokers(String clusterName, ServerWebExchange exchange) {
+  public Mono<ResponseEntity<Flux<Broker>>> getBrokers(String clusterName,
+                                                       ServerWebExchange exchange) {
     return Mono.just(ResponseEntity.ok(clusterService.getBrokers(clusterName)));
   }
 }

+ 5 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java

@@ -1,10 +1,10 @@
 package com.provectus.kafka.ui.controller;
 
 import com.provectus.kafka.ui.api.ClustersApi;
-import com.provectus.kafka.ui.service.ClusterService;
 import com.provectus.kafka.ui.model.Cluster;
 import com.provectus.kafka.ui.model.ClusterMetrics;
 import com.provectus.kafka.ui.model.ClusterStats;
+import com.provectus.kafka.ui.service.ClusterService;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.http.ResponseEntity;
@@ -20,14 +20,16 @@ public class ClustersController implements ClustersApi {
   private final ClusterService clusterService;
 
   @Override
-  public Mono<ResponseEntity<ClusterMetrics>> getClusterMetrics(String clusterName, ServerWebExchange exchange) {
+  public Mono<ResponseEntity<ClusterMetrics>> getClusterMetrics(String clusterName,
+                                                                ServerWebExchange exchange) {
     return clusterService.getClusterMetrics(clusterName)
         .map(ResponseEntity::ok)
         .onErrorReturn(ResponseEntity.notFound().build());
   }
 
   @Override
-  public Mono<ResponseEntity<ClusterStats>> getClusterStats(String clusterName, ServerWebExchange exchange) {
+  public Mono<ResponseEntity<ClusterStats>> getClusterStats(String clusterName,
+                                                            ServerWebExchange exchange) {
     return clusterService.getClusterStats(clusterName)
         .map(ResponseEntity::ok)
         .onErrorReturn(ResponseEntity.notFound().build());

+ 7 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java

@@ -1,9 +1,9 @@
 package com.provectus.kafka.ui.controller;
 
 import com.provectus.kafka.ui.api.ConsumerGroupsApi;
-import com.provectus.kafka.ui.service.ClusterService;
 import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.ConsumerGroupDetails;
+import com.provectus.kafka.ui.service.ClusterService;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.http.ResponseEntity;
@@ -21,15 +21,18 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
   @Override
   public Mono<ResponseEntity<ConsumerGroupDetails>> getConsumerGroup(
       String clusterName, String consumerGroupId, ServerWebExchange exchange) {
-    return clusterService.getConsumerGroupDetail(clusterName, consumerGroupId).map(ResponseEntity::ok);
+    return clusterService.getConsumerGroupDetail(clusterName, consumerGroupId)
+        .map(ResponseEntity::ok);
   }
 
 
   @Override
-  public Mono<ResponseEntity<Flux<ConsumerGroup>>> getConsumerGroups(String clusterName, ServerWebExchange exchange) {
+  public Mono<ResponseEntity<Flux<ConsumerGroup>>> getConsumerGroups(String clusterName,
+                                                                     ServerWebExchange exchange) {
     return clusterService.getConsumerGroups(clusterName)
         .map(Flux::fromIterable)
         .map(ResponseEntity::ok)
-        .switchIfEmpty(Mono.just(ResponseEntity.notFound().build())); // TODO: check behaviour on cluster not found and empty groups list
+        .switchIfEmpty(Mono.just(ResponseEntity.notFound()
+            .build())); // TODO: check behaviour on cluster not found and empty groups list
   }
 }

+ 96 - 63
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KafkaConnectController.java

@@ -1,8 +1,16 @@
 package com.provectus.kafka.ui.controller;
 
 import com.provectus.kafka.ui.api.KafkaConnectApi;
+import com.provectus.kafka.ui.model.Connect;
+import com.provectus.kafka.ui.model.Connector;
+import com.provectus.kafka.ui.model.ConnectorAction;
+import com.provectus.kafka.ui.model.ConnectorPlugin;
+import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
+import com.provectus.kafka.ui.model.NewConnector;
+import com.provectus.kafka.ui.model.Task;
 import com.provectus.kafka.ui.service.KafkaConnectService;
-import com.provectus.kafka.ui.model.*;
+import java.util.Map;
+import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.http.ResponseEntity;
@@ -11,82 +19,107 @@ import org.springframework.web.server.ServerWebExchange;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 
-import javax.validation.Valid;
-import java.util.Map;
-
 @RestController
 @RequiredArgsConstructor
 @Log4j2
 public class KafkaConnectController implements KafkaConnectApi {
-    private final KafkaConnectService kafkaConnectService;
+  private final KafkaConnectService kafkaConnectService;
 
-    @Override
-    public Mono<ResponseEntity<Flux<Connect>>> getConnects(String clusterName, ServerWebExchange exchange) {
-        return kafkaConnectService.getConnects(clusterName).map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Flux<Connect>>> getConnects(String clusterName,
+                                                         ServerWebExchange exchange) {
+    return kafkaConnectService.getConnects(clusterName).map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName, ServerWebExchange exchange) {
-        Flux<String> connectors = kafkaConnectService.getConnectors(clusterName, connectName);
-        return Mono.just(ResponseEntity.ok(connectors));
-    }
+  @Override
+  public Mono<ResponseEntity<Flux<String>>> getConnectors(String clusterName, String connectName,
+                                                          ServerWebExchange exchange) {
+    Flux<String> connectors = kafkaConnectService.getConnectors(clusterName, connectName);
+    return Mono.just(ResponseEntity.ok(connectors));
+  }
 
-    @Override
-    public Mono<ResponseEntity<Connector>> createConnector(String clusterName, String connectName, @Valid Mono<NewConnector> connector, ServerWebExchange exchange) {
-        return kafkaConnectService.createConnector(clusterName, connectName, connector)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Connector>> createConnector(String clusterName, String connectName,
+                                                         @Valid Mono<NewConnector> connector,
+                                                         ServerWebExchange exchange) {
+    return kafkaConnectService.createConnector(clusterName, connectName, connector)
+        .map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<Connector>> getConnector(String clusterName, String connectName, String connectorName, ServerWebExchange exchange) {
-        return kafkaConnectService.getConnector(clusterName, connectName, connectorName)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Connector>> getConnector(String clusterName, String connectName,
+                                                      String connectorName,
+                                                      ServerWebExchange exchange) {
+    return kafkaConnectService.getConnector(clusterName, connectName, connectorName)
+        .map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<Void>> deleteConnector(String clusterName, String connectName, String connectorName, ServerWebExchange exchange) {
-        return kafkaConnectService.deleteConnector(clusterName, connectName, connectorName)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Void>> deleteConnector(String clusterName, String connectName,
+                                                    String connectorName,
+                                                    ServerWebExchange exchange) {
+    return kafkaConnectService.deleteConnector(clusterName, connectName, connectorName)
+        .map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<Map<String, Object>>> getConnectorConfig(String clusterName, String connectName, String connectorName, ServerWebExchange exchange) {
-        return kafkaConnectService.getConnectorConfig(clusterName, connectName, connectorName)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Map<String, Object>>> getConnectorConfig(String clusterName,
+                                                                      String connectName,
+                                                                      String connectorName,
+                                                                      ServerWebExchange exchange) {
+    return kafkaConnectService.getConnectorConfig(clusterName, connectName, connectorName)
+        .map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<Connector>> setConnectorConfig(String clusterName, String connectName, String connectorName, @Valid Mono<Object> requestBody, ServerWebExchange exchange) {
-        return kafkaConnectService.setConnectorConfig(clusterName, connectName, connectorName, requestBody)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Connector>> setConnectorConfig(String clusterName, String connectName,
+                                                            String connectorName,
+                                                            @Valid Mono<Object> requestBody,
+                                                            ServerWebExchange exchange) {
+    return kafkaConnectService
+        .setConnectorConfig(clusterName, connectName, connectorName, requestBody)
+        .map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<Void>> updateConnectorState(String clusterName, String connectName, String connectorName, ConnectorAction action, ServerWebExchange exchange) {
-        return kafkaConnectService.updateConnectorState(clusterName, connectName, connectorName, action)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Void>> updateConnectorState(String clusterName, String connectName,
+                                                         String connectorName,
+                                                         ConnectorAction action,
+                                                         ServerWebExchange exchange) {
+    return kafkaConnectService.updateConnectorState(clusterName, connectName, connectorName, action)
+        .map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<Flux<Task>>> getConnectorTasks(String clusterName, String connectName, String connectorName, ServerWebExchange exchange) {
-        return Mono.just(ResponseEntity.ok(kafkaConnectService.getConnectorTasks(clusterName, connectName, connectorName)));
-    }
+  @Override
+  public Mono<ResponseEntity<Flux<Task>>> getConnectorTasks(String clusterName, String connectName,
+                                                            String connectorName,
+                                                            ServerWebExchange exchange) {
+    return Mono.just(ResponseEntity
+        .ok(kafkaConnectService.getConnectorTasks(clusterName, connectName, connectorName)));
+  }
 
-    @Override
-    public Mono<ResponseEntity<Void>> restartConnectorTask(String clusterName, String connectName, String connectorName, Integer taskId, ServerWebExchange exchange) {
-        return kafkaConnectService.restartConnectorTask(clusterName, connectName, connectorName, taskId)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Void>> restartConnectorTask(String clusterName, String connectName,
+                                                         String connectorName, Integer taskId,
+                                                         ServerWebExchange exchange) {
+    return kafkaConnectService.restartConnectorTask(clusterName, connectName, connectorName, taskId)
+        .map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<Flux<ConnectorPlugin>>> getConnectorPlugins(String clusterName, String connectName, ServerWebExchange exchange) {
-        return kafkaConnectService.getConnectorPlugins(clusterName, connectName)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<Flux<ConnectorPlugin>>> getConnectorPlugins(
+      String clusterName, String connectName, ServerWebExchange exchange) {
+    return kafkaConnectService.getConnectorPlugins(clusterName, connectName)
+        .map(ResponseEntity::ok);
+  }
 
-    @Override
-    public Mono<ResponseEntity<ConnectorPluginConfigValidationResponse>> validateConnectorPluginConfig(String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody, ServerWebExchange exchange) {
-        return kafkaConnectService.validateConnectorPluginConfig(clusterName, connectName, pluginName, requestBody)
-                .map(ResponseEntity::ok);
-    }
+  @Override
+  public Mono<ResponseEntity<ConnectorPluginConfigValidationResponse>>
+      validateConnectorPluginConfig(
+        String clusterName, String connectName, String pluginName, @Valid Mono<Object> requestBody,
+        ServerWebExchange exchange) {
+    return kafkaConnectService
+        .validateConnectorPluginConfig(clusterName, connectName, pluginName, requestBody)
+        .map(ResponseEntity::ok);
+  }
 }

+ 9 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java

@@ -2,9 +2,9 @@ package com.provectus.kafka.ui.controller;
 
 import com.provectus.kafka.ui.api.MessagesApi;
 import com.provectus.kafka.ui.model.ConsumerPosition;
-import com.provectus.kafka.ui.service.ClusterService;
 import com.provectus.kafka.ui.model.SeekType;
 import com.provectus.kafka.ui.model.TopicMessage;
+import com.provectus.kafka.ui.service.ClusterService;
 import java.util.Collections;
 import java.util.List;
 import java.util.Optional;
@@ -27,7 +27,8 @@ public class MessagesController implements MessagesApi {
 
   @Override
   public Mono<ResponseEntity<Void>> deleteTopicMessages(
-      String clusterName, String topicName, @Valid List<Integer> partitions, ServerWebExchange exchange) {
+      String clusterName, String topicName, @Valid List<Integer> partitions,
+      ServerWebExchange exchange) {
     return clusterService.deleteTopicMessages(
         clusterName,
         topicName,
@@ -41,7 +42,8 @@ public class MessagesController implements MessagesApi {
       String clusterName, String topicName, @Valid SeekType seekType, @Valid List<String> seekTo,
       @Valid Integer limit, @Valid String q, ServerWebExchange exchange) {
     return parseConsumerPosition(seekType, seekTo)
-        .map(consumerPosition -> ResponseEntity.ok(clusterService.getMessages(clusterName, topicName, consumerPosition, q, limit)));
+        .map(consumerPosition -> ResponseEntity
+            .ok(clusterService.getMessages(clusterName, topicName, consumerPosition, q, limit)));
   }
 
   private Mono<ConsumerPosition> parseConsumerPosition(SeekType seekType, List<String> seekTo) {
@@ -51,13 +53,15 @@ public class MessagesController implements MessagesApi {
         .map(p -> {
           String[] splited = p.split("::");
           if (splited.length != 2) {
-            throw new IllegalArgumentException("Wrong seekTo argument format. See API docs for details");
+            throw new IllegalArgumentException(
+                "Wrong seekTo argument format. See API docs for details");
           }
 
           return Pair.of(Integer.parseInt(splited[0]), Long.parseLong(splited[1]));
         })
         .collectMap(Pair::getKey, Pair::getValue)
-        .map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekType.BEGINNING, positions));
+        .map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekType.BEGINNING,
+            positions));
   }
 
 }

+ 20 - 12
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/SchemasController.java

@@ -1,11 +1,11 @@
 package com.provectus.kafka.ui.controller;
 
 import com.provectus.kafka.ui.api.SchemasApi;
-import com.provectus.kafka.ui.service.SchemaRegistryService;
 import com.provectus.kafka.ui.model.CompatibilityCheckResponse;
 import com.provectus.kafka.ui.model.CompatibilityLevel;
 import com.provectus.kafka.ui.model.NewSchemaSubject;
 import com.provectus.kafka.ui.model.SchemaSubject;
+import com.provectus.kafka.ui.service.SchemaRegistryService;
 import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
@@ -24,15 +24,16 @@ public class SchemasController implements SchemasApi {
 
   @Override
   public Mono<ResponseEntity<CompatibilityCheckResponse>> checkSchemaCompatibility(
-      String clusterName, String subject, @Valid Mono<NewSchemaSubject> newSchemaSubject, ServerWebExchange exchange) {
+      String clusterName, String subject, @Valid Mono<NewSchemaSubject> newSchemaSubject,
+      ServerWebExchange exchange) {
     return schemaRegistryService.checksSchemaCompatibility(clusterName, subject, newSchemaSubject)
         .map(ResponseEntity::ok);
   }
 
   @Override
-  public Mono<ResponseEntity<SchemaSubject>> createNewSchema(String clusterName,
-                                                             @Valid Mono<NewSchemaSubject> newSchemaSubject,
-                                                             ServerWebExchange exchange) {
+  public Mono<ResponseEntity<SchemaSubject>> createNewSchema(
+      String clusterName, @Valid Mono<NewSchemaSubject> newSchemaSubject,
+      ServerWebExchange exchange) {
     return schemaRegistryService
         .registerNewSchema(clusterName, newSchemaSubject)
         .map(ResponseEntity::ok);
@@ -59,7 +60,8 @@ public class SchemasController implements SchemasApi {
   @Override
   public Mono<ResponseEntity<Flux<SchemaSubject>>> getAllVersionsBySubject(
       String clusterName, String subjectName, ServerWebExchange exchange) {
-    Flux<SchemaSubject> schemas = schemaRegistryService.getAllVersionsBySubject(clusterName, subjectName);
+    Flux<SchemaSubject> schemas =
+        schemaRegistryService.getAllVersionsBySubject(clusterName, subjectName);
     return Mono.just(ResponseEntity.ok(schemas));
   }
 
@@ -72,25 +74,30 @@ public class SchemasController implements SchemasApi {
   }
 
   @Override
-  public Mono<ResponseEntity<SchemaSubject>> getLatestSchema(String clusterName, String subject, ServerWebExchange exchange) {
-    return schemaRegistryService.getLatestSchemaVersionBySubject(clusterName, subject).map(ResponseEntity::ok);
+  public Mono<ResponseEntity<SchemaSubject>> getLatestSchema(String clusterName, String subject,
+                                                             ServerWebExchange exchange) {
+    return schemaRegistryService.getLatestSchemaVersionBySubject(clusterName, subject)
+        .map(ResponseEntity::ok);
   }
 
   @Override
   public Mono<ResponseEntity<SchemaSubject>> getSchemaByVersion(
       String clusterName, String subject, Integer version, ServerWebExchange exchange) {
-    return schemaRegistryService.getSchemaSubjectByVersion(clusterName, subject, version).map(ResponseEntity::ok);
+    return schemaRegistryService.getSchemaSubjectByVersion(clusterName, subject, version)
+        .map(ResponseEntity::ok);
   }
 
   @Override
-  public Mono<ResponseEntity<Flux<SchemaSubject>>> getSchemas(String clusterName, ServerWebExchange exchange) {
+  public Mono<ResponseEntity<Flux<SchemaSubject>>> getSchemas(String clusterName,
+                                                              ServerWebExchange exchange) {
     Flux<SchemaSubject> subjects = schemaRegistryService.getAllLatestVersionSchemas(clusterName);
     return Mono.just(ResponseEntity.ok(subjects));
   }
 
   @Override
   public Mono<ResponseEntity<Void>> updateGlobalSchemaCompatibilityLevel(
-      String clusterName, @Valid Mono<CompatibilityLevel> compatibilityLevel, ServerWebExchange exchange) {
+      String clusterName, @Valid Mono<CompatibilityLevel> compatibilityLevel,
+      ServerWebExchange exchange) {
     log.info("Updating schema compatibility globally");
     return schemaRegistryService.updateSchemaCompatibility(clusterName, compatibilityLevel)
         .map(ResponseEntity::ok);
@@ -98,7 +105,8 @@ public class SchemasController implements SchemasApi {
 
   @Override
   public Mono<ResponseEntity<Void>> updateSchemaCompatibilityLevel(
-      String clusterName, String subject, @Valid Mono<CompatibilityLevel> compatibilityLevel, ServerWebExchange exchange) {
+      String clusterName, String subject, @Valid Mono<CompatibilityLevel> compatibilityLevel,
+      ServerWebExchange exchange) {
     log.info("Updating schema compatibility for subject: {}", subject);
     return schemaRegistryService.updateSchemaCompatibility(clusterName, subject, compatibilityLevel)
         .map(ResponseEntity::ok);

+ 14 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java

@@ -1,8 +1,14 @@
 package com.provectus.kafka.ui.controller;
 
 import com.provectus.kafka.ui.api.TopicsApi;
-import com.provectus.kafka.ui.model.*;
+import com.provectus.kafka.ui.model.Topic;
+import com.provectus.kafka.ui.model.TopicConfig;
+import com.provectus.kafka.ui.model.TopicDetails;
+import com.provectus.kafka.ui.model.TopicFormData;
+import com.provectus.kafka.ui.model.TopicsResponse;
 import com.provectus.kafka.ui.service.ClusterService;
+import java.util.Optional;
+import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.http.HttpStatus;
@@ -12,9 +18,6 @@ import org.springframework.web.server.ServerWebExchange;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 
-import javax.validation.Valid;
-import java.util.Optional;
-
 @RestController
 @RequiredArgsConstructor
 @Log4j2
@@ -58,13 +61,17 @@ public class TopicsController implements TopicsApi {
   }
 
   @Override
-  public Mono<ResponseEntity<TopicsResponse>> getTopics(String clusterName, @Valid Integer page, @Valid Integer perPage, ServerWebExchange exchange) {
-    return Mono.just(ResponseEntity.ok(clusterService.getTopics(clusterName, Optional.ofNullable(page), Optional.ofNullable(perPage))));
+  public Mono<ResponseEntity<TopicsResponse>> getTopics(String clusterName, @Valid Integer page,
+                                                        @Valid Integer perPage,
+                                                        ServerWebExchange exchange) {
+    return Mono.just(ResponseEntity.ok(clusterService
+        .getTopics(clusterName, Optional.ofNullable(page), Optional.ofNullable(perPage))));
   }
 
   @Override
   public Mono<ResponseEntity<Topic>> updateTopic(
-      String clusterId, String topicName, @Valid Mono<TopicFormData> topicFormData, ServerWebExchange exchange) {
+      String clusterId, String topicName, @Valid Mono<TopicFormData> topicFormData,
+      ServerWebExchange exchange) {
     return clusterService.updateTopic(clusterId, topicName, topicFormData).map(ResponseEntity::ok);
   }
 }

+ 34 - 34
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/DeserializationService.java

@@ -1,46 +1,46 @@
 package com.provectus.kafka.ui.deserialization;
 
 import com.fasterxml.jackson.databind.ObjectMapper;
-import com.provectus.kafka.ui.service.ClustersStorage;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import lombok.RequiredArgsConstructor;
-import org.springframework.stereotype.Component;
-
-import javax.annotation.PostConstruct;
+import com.provectus.kafka.ui.service.ClustersStorage;
 import java.util.Map;
 import java.util.stream.Collectors;
+import javax.annotation.PostConstruct;
+import lombok.RequiredArgsConstructor;
+import org.springframework.stereotype.Component;
 
 @Component
 @RequiredArgsConstructor
 public class DeserializationService {
 
-	private final ClustersStorage clustersStorage;
-	private final ObjectMapper objectMapper;
-	private Map<String, RecordDeserializer> clusterDeserializers;
-
-
-	@PostConstruct
-	public void init() {
-		this.clusterDeserializers = clustersStorage.getKafkaClusters().stream()
-				.collect(Collectors.toMap(
-						KafkaCluster::getName,
-						this::createRecordDeserializerForCluster
-				));
-	}
-
-	private RecordDeserializer createRecordDeserializerForCluster(KafkaCluster cluster) {
-		try {
-			if (cluster.getProtobufFile()!=null) {
-				return new ProtobufFileRecordDeserializer(cluster.getProtobufFile(), cluster.getProtobufMessageName(), objectMapper);
-			} else {
-				return new SchemaRegistryRecordDeserializer(cluster, objectMapper);
-			}
-		} catch (Throwable e) {
-			throw new RuntimeException("Can't init deserializer", e);
-		}
-	}
-
-	public RecordDeserializer getRecordDeserializerForCluster(KafkaCluster cluster) {
-		return clusterDeserializers.get(cluster.getName());
-	}
+  private final ClustersStorage clustersStorage;
+  private final ObjectMapper objectMapper;
+  private Map<String, RecordDeserializer> clusterDeserializers;
+
+
+  @PostConstruct
+  public void init() {
+    this.clusterDeserializers = clustersStorage.getKafkaClusters().stream()
+        .collect(Collectors.toMap(
+            KafkaCluster::getName,
+            this::createRecordDeserializerForCluster
+        ));
+  }
+
+  private RecordDeserializer createRecordDeserializerForCluster(KafkaCluster cluster) {
+    try {
+      if (cluster.getProtobufFile() != null) {
+        return new ProtobufFileRecordDeserializer(cluster.getProtobufFile(),
+            cluster.getProtobufMessageName(), objectMapper);
+      } else {
+        return new SchemaRegistryRecordDeserializer(cluster, objectMapper);
+      }
+    } catch (Throwable e) {
+      throw new RuntimeException("Can't init deserializer", e);
+    }
+  }
+
+  public RecordDeserializer getRecordDeserializerForCluster(KafkaCluster cluster) {
+    return clusterDeserializers.get(cluster.getName());
+  }
 }

+ 26 - 25
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/ProtobufFileRecordDeserializer.java

@@ -5,41 +5,42 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import com.google.protobuf.DynamicMessage;
 import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
 import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.utils.Bytes;
-
 import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.util.Map;
 import java.util.stream.Collectors;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.utils.Bytes;
 
 public class ProtobufFileRecordDeserializer implements RecordDeserializer {
-    private final ProtobufSchema protobufSchema;
-    private final ObjectMapper objectMapper;
+  private final ProtobufSchema protobufSchema;
+  private final ObjectMapper objectMapper;
 
-    public ProtobufFileRecordDeserializer(Path protobufSchemaPath, String messageName, ObjectMapper objectMapper) throws IOException {
-        this.objectMapper = objectMapper;
-        final String schemaString = Files.lines(protobufSchemaPath).collect(Collectors.joining());
-        this.protobufSchema = new ProtobufSchema(schemaString).copy(messageName);
-    }
+  public ProtobufFileRecordDeserializer(Path protobufSchemaPath, String messageName,
+                                        ObjectMapper objectMapper) throws IOException {
+    this.objectMapper = objectMapper;
+    final String schemaString = Files.lines(protobufSchemaPath).collect(Collectors.joining());
+    this.protobufSchema = new ProtobufSchema(schemaString).copy(messageName);
+  }
 
-    @Override
-    public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
-        try {
-            final DynamicMessage message = DynamicMessage.parseFrom(
-                    protobufSchema.toDescriptor(),
-                    new ByteArrayInputStream(record.value().get())
-            );
-            byte[] bytes = ProtobufSchemaUtils.toJson(message);
-            return parseJson(bytes);
-        } catch (Throwable e) {
-            throw new RuntimeException("Failed to parse record from topic " + record.topic(), e);
-        }
+  @Override
+  public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
+    try {
+      final DynamicMessage message = DynamicMessage.parseFrom(
+          protobufSchema.toDescriptor(),
+          new ByteArrayInputStream(record.value().get())
+      );
+      byte[] bytes = ProtobufSchemaUtils.toJson(message);
+      return parseJson(bytes);
+    } catch (Throwable e) {
+      throw new RuntimeException("Failed to parse record from topic " + record.topic(), e);
     }
+  }
 
-    private Object parseJson(byte[] bytes) throws IOException {
-        return objectMapper.readValue(bytes, new TypeReference<Map<String, Object>>() {});
-    }
+  private Object parseJson(byte[] bytes) throws IOException {
+    return objectMapper.readValue(bytes, new TypeReference<Map<String, Object>>() {
+    });
+  }
 }

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/RecordDeserializer.java

@@ -5,5 +5,5 @@ import org.apache.kafka.common.utils.Bytes;
 
 public interface RecordDeserializer {
 
-	Object deserialize(ConsumerRecord<Bytes, Bytes> record);
+  Object deserialize(ConsumerRecord<Bytes, Bytes> record);
 }

+ 175 - 171
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/SchemaRegistryRecordDeserializer.java

@@ -15,184 +15,188 @@ import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaProvider;
 import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
 import io.confluent.kafka.serializers.KafkaAvroDeserializer;
 import io.confluent.kafka.serializers.protobuf.KafkaProtobufDeserializer;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.ConcurrentHashMap;
 import lombok.extern.log4j.Log4j2;
 import org.apache.avro.generic.GenericRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.common.serialization.StringDeserializer;
 import org.apache.kafka.common.utils.Bytes;
 
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-
 @Log4j2
 public class SchemaRegistryRecordDeserializer implements RecordDeserializer {
 
-	private final static int CLIENT_IDENTITY_MAP_CAPACITY = 100;
-
-	private final KafkaCluster cluster;
-	private final SchemaRegistryClient schemaRegistryClient;
-	private final KafkaAvroDeserializer avroDeserializer;
-	private final KafkaProtobufDeserializer<?> protobufDeserializer;
-	private final ObjectMapper objectMapper;
-	private final StringDeserializer stringDeserializer;
-
-	private final Map<String, MessageFormat> topicFormatMap = new ConcurrentHashMap<>();
-
-	public SchemaRegistryRecordDeserializer(KafkaCluster cluster, ObjectMapper objectMapper) {
-		this.cluster = cluster;
-		this.objectMapper = objectMapper;
-
-		this.schemaRegistryClient = Optional.ofNullable(cluster.getSchemaRegistry())
-                .map(schemaRegistryUrl -> {
-                            List<SchemaProvider> schemaProviders = List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider());
-                            return new CachedSchemaRegistryClient(
-                                    Collections.singletonList(schemaRegistryUrl),
-                                    CLIENT_IDENTITY_MAP_CAPACITY,
-                                    schemaProviders,
-                                    Collections.emptyMap()
-                            );
-                        }
-                ).orElse(null);
-
-		this.avroDeserializer = Optional.ofNullable(this.schemaRegistryClient)
-				.map(KafkaAvroDeserializer::new)
-				.orElse(null);
-		this.protobufDeserializer = Optional.ofNullable(this.schemaRegistryClient)
-				.map(KafkaProtobufDeserializer::new)
-				.orElse(null);
-		this.stringDeserializer = new StringDeserializer();
-	}
-
-	public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
-		MessageFormat format = getMessageFormat(record);
-
-		try {
-			Object parsedValue;
-			switch (format) {
-				case AVRO:
-					parsedValue = parseAvroRecord(record);
-					break;
-				case PROTOBUF:
-					parsedValue = parseProtobufRecord(record);
-					break;
-				case JSON:
-					parsedValue = parseJsonRecord(record);
-					break;
-				case STRING:
-					parsedValue = parseStringRecord(record);
-					break;
-				default:
-					throw new IllegalArgumentException("Unknown message format " + format + " for topic " + record.topic());
-			}
-			return parsedValue;
-		} catch (IOException e) {
-			throw new RuntimeException("Failed to parse record from topic " + record.topic(), e);
-		}
-	}
-
-	private MessageFormat getMessageFormat(ConsumerRecord<Bytes, Bytes> record) {
-		return topicFormatMap.computeIfAbsent(record.topic(), k -> detectFormat(record));
-	}
-
-	private MessageFormat detectFormat(ConsumerRecord<Bytes, Bytes> record) {
-		String schemaName = String.format(cluster.getSchemaNameTemplate(), record.topic());
-		if (schemaRegistryClient != null) {
-			try {
-				final List<Integer> versions = schemaRegistryClient.getAllVersions(schemaName);
-				if (!versions.isEmpty()) {
-					final Integer version = versions.iterator().next();
-					final String subjectName = String.format(cluster.getSchemaNameTemplate(), record.topic());
-					final Schema schema = schemaRegistryClient.getByVersion(subjectName, version, false);
-					if (schema.getSchemaType().equals(MessageFormat.PROTOBUF.name())) {
-						try {
-							protobufDeserializer.deserialize(record.topic(), record.value().get());
-							return MessageFormat.PROTOBUF;
-						} catch (Throwable e) {
-							log.info("Failed to get Protobuf schema for topic {}", record.topic(), e);
-						}
-					} else if (schema.getSchemaType().equals(MessageFormat.AVRO.name())) {
-						try {
-							avroDeserializer.deserialize(record.topic(), record.value().get());
-							return MessageFormat.AVRO;
-						} catch (Throwable e) {
-							log.info("Failed to get Avro schema for topic {}", record.topic(), e);
-						}
-					} else if (schema.getSchemaType().equals(MessageFormat.JSON.name())) {
-						try {
-							parseJsonRecord(record);
-							return MessageFormat.JSON;
-						} catch (IOException e) {
-							log.info("Failed to parse json from topic {}", record.topic());
-						}
-					}
-				}
-			} catch (RestClientException | IOException e) {
-				log.warn("Failed to get Schema for topic {}", record.topic(), e);
-			}
-		}
-
-		try {
-			parseJsonRecord(record);
-			return MessageFormat.JSON;
-		} catch (IOException e) {
-			log.info("Failed to parse json from topic {}", record.topic());
-		}
-
-		return MessageFormat.STRING;
-	}
-
-	private Object parseAvroRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
-		String topic = record.topic();
-		if (record.value()!=null && avroDeserializer !=null) {
-			byte[] valueBytes = record.value().get();
-			GenericRecord avroRecord = (GenericRecord) avroDeserializer.deserialize(topic, valueBytes);
-			byte[] bytes = AvroSchemaUtils.toJson(avroRecord);
-			return parseJson(bytes);
-		} else {
-			return Map.of();
-		}
-	}
-
-	private Object parseProtobufRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
-		String topic = record.topic();
-		if (record.value()!=null && protobufDeserializer !=null) {
-			byte[] valueBytes = record.value().get();
-			final Message message = protobufDeserializer.deserialize(topic, valueBytes);
-			byte[] bytes = ProtobufSchemaUtils.toJson(message);
-			return parseJson(bytes);
-		} else {
-			return Map.of();
-		}
-	}
-
-	private Object parseJsonRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
-		var value = record.value();
-		if (value == null) {
-			return Map.of();
-		}
-		byte[] valueBytes = value.get();
-		return parseJson(valueBytes);
-	}
-
-	private Object parseJson(byte[] bytes) throws IOException {
-		return objectMapper.readValue(bytes, new TypeReference<Map<String, Object>>() {
-		});
-	}
-
-	private Object parseStringRecord(ConsumerRecord<Bytes, Bytes> record) {
-		String topic = record.topic();
-		if (record.value() == null) {
-			return Map.of();
-		}
-		byte[] valueBytes = record.value().get();
-		return stringDeserializer.deserialize(topic, valueBytes);
-	}
-
-	public enum MessageFormat {
-		AVRO,
-		JSON,
-		STRING,
-		PROTOBUF
-	}
+  private static final int CLIENT_IDENTITY_MAP_CAPACITY = 100;
+
+  private final KafkaCluster cluster;
+  private final SchemaRegistryClient schemaRegistryClient;
+  private final KafkaAvroDeserializer avroDeserializer;
+  private final KafkaProtobufDeserializer<?> protobufDeserializer;
+  private final ObjectMapper objectMapper;
+  private final StringDeserializer stringDeserializer;
+
+  private final Map<String, MessageFormat> topicFormatMap = new ConcurrentHashMap<>();
+
+  public SchemaRegistryRecordDeserializer(KafkaCluster cluster, ObjectMapper objectMapper) {
+    this.cluster = cluster;
+    this.objectMapper = objectMapper;
+
+    this.schemaRegistryClient = Optional.ofNullable(cluster.getSchemaRegistry())
+        .map(schemaRegistryUrl -> {
+              List<SchemaProvider> schemaProviders =
+                  List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider());
+              return new CachedSchemaRegistryClient(
+                  Collections.singletonList(schemaRegistryUrl),
+                  CLIENT_IDENTITY_MAP_CAPACITY,
+                  schemaProviders,
+                  Collections.emptyMap()
+              );
+            }
+        ).orElse(null);
+
+    this.avroDeserializer = Optional.ofNullable(this.schemaRegistryClient)
+        .map(KafkaAvroDeserializer::new)
+        .orElse(null);
+    this.protobufDeserializer = Optional.ofNullable(this.schemaRegistryClient)
+        .map(KafkaProtobufDeserializer::new)
+        .orElse(null);
+    this.stringDeserializer = new StringDeserializer();
+  }
+
+  public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
+    MessageFormat format = getMessageFormat(record);
+
+    try {
+      Object parsedValue;
+      switch (format) {
+        case AVRO:
+          parsedValue = parseAvroRecord(record);
+          break;
+        case PROTOBUF:
+          parsedValue = parseProtobufRecord(record);
+          break;
+        case JSON:
+          parsedValue = parseJsonRecord(record);
+          break;
+        case STRING:
+          parsedValue = parseStringRecord(record);
+          break;
+        default:
+          throw new IllegalArgumentException(
+              "Unknown message format " + format + " for topic " + record.topic());
+      }
+      return parsedValue;
+    } catch (IOException e) {
+      throw new RuntimeException("Failed to parse record from topic " + record.topic(), e);
+    }
+  }
+
+  private MessageFormat getMessageFormat(ConsumerRecord<Bytes, Bytes> record) {
+    return topicFormatMap.computeIfAbsent(record.topic(), k -> detectFormat(record));
+  }
+
+  private MessageFormat detectFormat(ConsumerRecord<Bytes, Bytes> record) {
+    String schemaName = String.format(cluster.getSchemaNameTemplate(), record.topic());
+    if (schemaRegistryClient != null) {
+      try {
+        final List<Integer> versions = schemaRegistryClient.getAllVersions(schemaName);
+        if (!versions.isEmpty()) {
+          final Integer version = versions.iterator().next();
+          final String subjectName = String.format(cluster.getSchemaNameTemplate(), record.topic());
+          final Schema schema = schemaRegistryClient.getByVersion(subjectName, version, false);
+          if (schema.getSchemaType().equals(MessageFormat.PROTOBUF.name())) {
+            try {
+              protobufDeserializer.deserialize(record.topic(), record.value().get());
+              return MessageFormat.PROTOBUF;
+            } catch (Throwable e) {
+              log.info("Failed to get Protobuf schema for topic {}", record.topic(), e);
+            }
+          } else if (schema.getSchemaType().equals(MessageFormat.AVRO.name())) {
+            try {
+              avroDeserializer.deserialize(record.topic(), record.value().get());
+              return MessageFormat.AVRO;
+            } catch (Throwable e) {
+              log.info("Failed to get Avro schema for topic {}", record.topic(), e);
+            }
+          } else if (schema.getSchemaType().equals(MessageFormat.JSON.name())) {
+            try {
+              parseJsonRecord(record);
+              return MessageFormat.JSON;
+            } catch (IOException e) {
+              log.info("Failed to parse json from topic {}", record.topic());
+            }
+          }
+        }
+      } catch (RestClientException | IOException e) {
+        log.warn("Failed to get Schema for topic {}", record.topic(), e);
+      }
+    }
+
+    try {
+      parseJsonRecord(record);
+      return MessageFormat.JSON;
+    } catch (IOException e) {
+      log.info("Failed to parse json from topic {}", record.topic());
+    }
+
+    return MessageFormat.STRING;
+  }
+
+  private Object parseAvroRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
+    String topic = record.topic();
+    if (record.value() != null && avroDeserializer != null) {
+      byte[] valueBytes = record.value().get();
+      GenericRecord avroRecord = (GenericRecord) avroDeserializer.deserialize(topic, valueBytes);
+      byte[] bytes = AvroSchemaUtils.toJson(avroRecord);
+      return parseJson(bytes);
+    } else {
+      return Map.of();
+    }
+  }
+
+  private Object parseProtobufRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
+    String topic = record.topic();
+    if (record.value() != null && protobufDeserializer != null) {
+      byte[] valueBytes = record.value().get();
+      final Message message = protobufDeserializer.deserialize(topic, valueBytes);
+      byte[] bytes = ProtobufSchemaUtils.toJson(message);
+      return parseJson(bytes);
+    } else {
+      return Map.of();
+    }
+  }
+
+  private Object parseJsonRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
+    var value = record.value();
+    if (value == null) {
+      return Map.of();
+    }
+    byte[] valueBytes = value.get();
+    return parseJson(valueBytes);
+  }
+
+  private Object parseJson(byte[] bytes) throws IOException {
+    return objectMapper.readValue(bytes, new TypeReference<Map<String, Object>>() {
+    });
+  }
+
+  private Object parseStringRecord(ConsumerRecord<Bytes, Bytes> record) {
+    String topic = record.topic();
+    if (record.value() == null) {
+      return Map.of();
+    }
+    byte[] valueBytes = record.value().get();
+    return stringDeserializer.deserialize(topic, valueBytes);
+  }
+
+  public enum MessageFormat {
+    AVRO,
+    JSON,
+    STRING,
+    PROTOBUF
+  }
 }

+ 9 - 9
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/SimpleRecordDeserializer.java

@@ -6,14 +6,14 @@ import org.apache.kafka.common.utils.Bytes;
 
 public class SimpleRecordDeserializer implements RecordDeserializer {
 
-	private final StringDeserializer stringDeserializer = new StringDeserializer();
+  private final StringDeserializer stringDeserializer = new StringDeserializer();
 
-	@Override
-	public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
-		if (record.value()!=null) {
-			return stringDeserializer.deserialize(record.topic(), record.value().get());
-		} else {
-			return "empty";
-		}
-	}
+  @Override
+  public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
+    if (record.value() != null) {
+      return stringDeserializer.deserialize(record.topic(), record.value().get());
+    } else {
+      return "empty";
+    }
+  }
 }

+ 16 - 15
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/CustomBaseException.java

@@ -3,24 +3,25 @@ package com.provectus.kafka.ui.exception;
 import org.springframework.http.HttpStatus;
 
 public abstract class CustomBaseException extends RuntimeException {
-    public CustomBaseException() {
-    }
+  public CustomBaseException() {
+  }
 
-    public CustomBaseException(String message) {
-        super(message);
-    }
+  public CustomBaseException(String message) {
+    super(message);
+  }
 
-    public CustomBaseException(String message, Throwable cause) {
-        super(message, cause);
-    }
+  public CustomBaseException(String message, Throwable cause) {
+    super(message, cause);
+  }
 
-    public CustomBaseException(Throwable cause) {
-        super(cause);
-    }
+  public CustomBaseException(Throwable cause) {
+    super(cause);
+  }
 
-    public CustomBaseException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) {
-        super(message, cause, enableSuppression, writableStackTrace);
-    }
+  public CustomBaseException(String message, Throwable cause, boolean enableSuppression,
+                             boolean writableStackTrace) {
+    super(message, cause, enableSuppression, writableStackTrace);
+  }
 
-    public abstract HttpStatus getResponseStatusCode();
+  public abstract HttpStatus getResponseStatusCode();
 }

+ 8 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/DuplicateEntityException.java

@@ -2,14 +2,14 @@ package com.provectus.kafka.ui.exception;
 
 import org.springframework.http.HttpStatus;
 
-public class DuplicateEntityException extends CustomBaseException{
+public class DuplicateEntityException extends CustomBaseException {
 
-    public DuplicateEntityException(String message) {
-        super(message);
-    }
+  public DuplicateEntityException(String message) {
+    super(message);
+  }
 
-    @Override
-    public HttpStatus getResponseStatusCode() {
-        return HttpStatus.CONFLICT;
-    }
+  @Override
+  public HttpStatus getResponseStatusCode() {
+    return HttpStatus.CONFLICT;
+  }
 }

+ 17 - 18
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorAttributes.java

@@ -1,32 +1,31 @@
 package com.provectus.kafka.ui.exception;
 
+import java.util.Map;
 import org.springframework.boot.web.reactive.error.DefaultErrorAttributes;
 import org.springframework.stereotype.Component;
 import org.springframework.web.reactive.function.client.WebClientResponseException;
 import org.springframework.web.reactive.function.server.ServerRequest;
 
-import java.util.Map;
-
 @Component
 public class GlobalErrorAttributes extends DefaultErrorAttributes {
 
-    public static final String STATUS = "status";
+  public static final String STATUS = "status";
 
-    @Override
-    public Map<String, Object> getErrorAttributes(ServerRequest request, boolean includeStackTrace) {
-        Map<String, Object> errorAttrs = super.getErrorAttributes(request, includeStackTrace);
-        includeCustomErrorAttributes(request, errorAttrs);
-        return errorAttrs;
-    }
+  @Override
+  public Map<String, Object> getErrorAttributes(ServerRequest request, boolean includeStackTrace) {
+    Map<String, Object> errorAttrs = super.getErrorAttributes(request, includeStackTrace);
+    includeCustomErrorAttributes(request, errorAttrs);
+    return errorAttrs;
+  }
 
-    private void includeCustomErrorAttributes(ServerRequest request, Map<String, Object> errorAttrs) {
-        Throwable error = getError(request);
-        if (error instanceof WebClientResponseException) {
-            var webClientError = (WebClientResponseException) error;
-            errorAttrs.put(STATUS, webClientError.getStatusCode());
-        } else if (error instanceof CustomBaseException) {
-            var customBaseError = (CustomBaseException) error;
-            errorAttrs.put(STATUS, customBaseError.getResponseStatusCode());
-        }
+  private void includeCustomErrorAttributes(ServerRequest request, Map<String, Object> errorAttrs) {
+    Throwable error = getError(request);
+    if (error instanceof WebClientResponseException) {
+      var webClientError = (WebClientResponseException) error;
+      errorAttrs.put(STATUS, webClientError.getStatusCode());
+    } else if (error instanceof CustomBaseException) {
+      var customBaseError = (CustomBaseException) error;
+      errorAttrs.put(STATUS, customBaseError.getResponseStatusCode());
     }
+  }
 }

+ 31 - 24
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/GlobalErrorWebExceptionHandler.java

@@ -1,5 +1,7 @@
 package com.provectus.kafka.ui.exception;
 
+import java.util.Map;
+import java.util.Optional;
 import org.springframework.boot.autoconfigure.web.ResourceProperties;
 import org.springframework.boot.autoconfigure.web.reactive.error.AbstractErrorWebExceptionHandler;
 import org.springframework.boot.web.reactive.error.ErrorAttributes;
@@ -10,39 +12,44 @@ import org.springframework.http.MediaType;
 import org.springframework.http.codec.ServerCodecConfigurer;
 import org.springframework.stereotype.Component;
 import org.springframework.web.reactive.function.BodyInserters;
-import org.springframework.web.reactive.function.server.*;
+import org.springframework.web.reactive.function.server.RequestPredicates;
+import org.springframework.web.reactive.function.server.RouterFunction;
+import org.springframework.web.reactive.function.server.RouterFunctions;
+import org.springframework.web.reactive.function.server.ServerRequest;
+import org.springframework.web.reactive.function.server.ServerResponse;
 import reactor.core.publisher.Mono;
 
-import java.util.Map;
-import java.util.Optional;
-
 /**
- * The order of our global error handler is -2 to give it a higher priority than the default {@link org.springframework.boot.autoconfigure.web.reactive.error.DefaultErrorWebExceptionHandler}
+ * The order of our global error handler is -2 to give it a higher priority than the default
+ * {@link org.springframework.boot.autoconfigure.web.reactive.error.DefaultErrorWebExceptionHandler}
  * which is registered at <code>@Order(-1)</code>.
  */
 @Component
 @Order(-2)
 public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHandler {
 
-    public GlobalErrorWebExceptionHandler(GlobalErrorAttributes errorAttributes, ResourceProperties resourceProperties, ApplicationContext applicationContext,
-                                          ServerCodecConfigurer codecConfigurer) {
-        super(errorAttributes, resourceProperties, applicationContext);
-        this.setMessageWriters(codecConfigurer.getWriters());
-    }
+  public GlobalErrorWebExceptionHandler(GlobalErrorAttributes errorAttributes,
+                                        ResourceProperties resourceProperties,
+                                        ApplicationContext applicationContext,
+                                        ServerCodecConfigurer codecConfigurer) {
+    super(errorAttributes, resourceProperties, applicationContext);
+    this.setMessageWriters(codecConfigurer.getWriters());
+  }
 
-    @Override
-    protected RouterFunction<ServerResponse> getRoutingFunction(ErrorAttributes errorAttributes) {
-        return RouterFunctions.route(RequestPredicates.all(), this::renderErrorResponse);
-    }
+  @Override
+  protected RouterFunction<ServerResponse> getRoutingFunction(ErrorAttributes errorAttributes) {
+    return RouterFunctions.route(RequestPredicates.all(), this::renderErrorResponse);
+  }
 
-    private Mono<ServerResponse> renderErrorResponse(ServerRequest request) {
-        Map<String, Object> errorAttributes = getErrorAttributes(request, false);
-        HttpStatus statusCode = Optional.ofNullable(errorAttributes.get(GlobalErrorAttributes.STATUS))
-                .map(code -> code instanceof Integer ? HttpStatus.valueOf((Integer) code) : (HttpStatus) code)
-                .orElse(HttpStatus.BAD_REQUEST);
-        return ServerResponse
-                .status(statusCode)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromValue(errorAttributes));
-    }
+  private Mono<ServerResponse> renderErrorResponse(ServerRequest request) {
+    Map<String, Object> errorAttributes = getErrorAttributes(request, false);
+    HttpStatus statusCode = Optional.ofNullable(errorAttributes.get(GlobalErrorAttributes.STATUS))
+        .map(code -> code instanceof Integer ? HttpStatus.valueOf((Integer) code) :
+            (HttpStatus) code)
+        .orElse(HttpStatus.BAD_REQUEST);
+    return ServerResponse
+        .status(statusCode)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromValue(errorAttributes));
+  }
 }

+ 8 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/NotFoundException.java

@@ -4,12 +4,12 @@ import org.springframework.http.HttpStatus;
 
 public class NotFoundException extends CustomBaseException {
 
-    public NotFoundException(String message) {
-        super(message);
-    }
-
-    @Override
-    public HttpStatus getResponseStatusCode() {
-        return HttpStatus.NOT_FOUND;
-    }
+  public NotFoundException(String message) {
+    super(message);
+  }
+
+  @Override
+  public HttpStatus getResponseStatusCode() {
+    return HttpStatus.NOT_FOUND;
+  }
 }

+ 8 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ReadOnlyException.java

@@ -4,12 +4,12 @@ import org.springframework.http.HttpStatus;
 
 public class ReadOnlyException extends CustomBaseException {
 
-    public ReadOnlyException() {
-        super("This cluster is in read-only mode.");
-    }
-
-    @Override
-    public HttpStatus getResponseStatusCode() {
-        return HttpStatus.METHOD_NOT_ALLOWED;
-    }
+  public ReadOnlyException() {
+    super("This cluster is in read-only mode.");
+  }
+
+  @Override
+  public HttpStatus getResponseStatusCode() {
+    return HttpStatus.METHOD_NOT_ALLOWED;
+  }
 }

+ 8 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/RebalanceInProgressException.java

@@ -4,12 +4,12 @@ import org.springframework.http.HttpStatus;
 
 public class RebalanceInProgressException extends CustomBaseException {
 
-    public RebalanceInProgressException() {
-        super("Rebalance is in progress.");
-    }
-
-    @Override
-    public HttpStatus getResponseStatusCode() {
-        return HttpStatus.CONFLICT;
-    }
+  public RebalanceInProgressException() {
+    super("Rebalance is in progress.");
+  }
+
+  @Override
+  public HttpStatus getResponseStatusCode() {
+    return HttpStatus.CONFLICT;
+  }
 }

+ 8 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/UnprocessableEntityException.java

@@ -2,14 +2,14 @@ package com.provectus.kafka.ui.exception;
 
 import org.springframework.http.HttpStatus;
 
-public class UnprocessableEntityException extends CustomBaseException{
+public class UnprocessableEntityException extends CustomBaseException {
 
-    public UnprocessableEntityException(String message) {
-        super(message);
-    }
+  public UnprocessableEntityException(String message) {
+    super(message);
+  }
 
-    @Override
-    public HttpStatus getResponseStatusCode() {
-        return HttpStatus.UNPROCESSABLE_ENTITY;
-    }
+  @Override
+  public HttpStatus getResponseStatusCode() {
+    return HttpStatus.UNPROCESSABLE_ENTITY;
+  }
 }

+ 7 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ValidationException.java

@@ -3,12 +3,12 @@ package com.provectus.kafka.ui.exception;
 import org.springframework.http.HttpStatus;
 
 public class ValidationException extends CustomBaseException {
-    public ValidationException(String message) {
-        super(message);
-    }
+  public ValidationException(String message) {
+    super(message);
+  }
 
-    @Override
-    public HttpStatus getResponseStatusCode() {
-        return HttpStatus.BAD_REQUEST;
-    }
+  @Override
+  public HttpStatus getResponseStatusCode() {
+    return HttpStatus.BAD_REQUEST;
+  }
 }

+ 117 - 81
kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java

@@ -1,99 +1,135 @@
 package com.provectus.kafka.ui.mapper;
 
 import com.provectus.kafka.ui.config.ClustersProperties;
-import com.provectus.kafka.ui.model.*;
+import com.provectus.kafka.ui.model.BrokerDiskUsage;
+import com.provectus.kafka.ui.model.BrokerMetrics;
+import com.provectus.kafka.ui.model.Cluster;
+import com.provectus.kafka.ui.model.ClusterMetrics;
+import com.provectus.kafka.ui.model.ClusterStats;
+import com.provectus.kafka.ui.model.CompatibilityCheckResponse;
+import com.provectus.kafka.ui.model.CompatibilityLevel;
+import com.provectus.kafka.ui.model.Connect;
+import com.provectus.kafka.ui.model.Feature;
+import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
+import com.provectus.kafka.ui.model.InternalBrokerMetrics;
+import com.provectus.kafka.ui.model.InternalClusterMetrics;
+import com.provectus.kafka.ui.model.InternalPartition;
+import com.provectus.kafka.ui.model.InternalReplica;
+import com.provectus.kafka.ui.model.InternalTopic;
+import com.provectus.kafka.ui.model.InternalTopicConfig;
+import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.KafkaConnectCluster;
+import com.provectus.kafka.ui.model.Partition;
+import com.provectus.kafka.ui.model.Replica;
+import com.provectus.kafka.ui.model.Topic;
+import com.provectus.kafka.ui.model.TopicConfig;
+import com.provectus.kafka.ui.model.TopicDetails;
 import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityCheck;
 import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
-import java.util.Properties;
-import org.mapstruct.Mapper;
-import org.mapstruct.Mapping;
-
 import java.math.BigDecimal;
 import java.nio.file.Path;
 import java.util.List;
 import java.util.Map;
+import java.util.Properties;
 import java.util.stream.Collectors;
+import org.mapstruct.Mapper;
+import org.mapstruct.Mapping;
 
 @Mapper(componentModel = "spring")
 public interface ClusterMapper {
 
-    @Mapping(target = "brokerCount", source = "metrics.brokerCount")
-    @Mapping(target = "onlinePartitionCount", source = "metrics.onlinePartitionCount")
-    @Mapping(target = "topicCount", source = "metrics.topicCount")
-    @Mapping(target = "bytesInPerSec", source = "metrics.bytesInPerSec", qualifiedByName = "sumMetrics")
-    @Mapping(target = "bytesOutPerSec", source = "metrics.bytesOutPerSec", qualifiedByName = "sumMetrics")
-    Cluster toCluster(KafkaCluster cluster);
-    @Mapping(target = "protobufFile", source = "protobufFile", qualifiedByName="resolvePath")
-    @Mapping(target = "properties", source = "properties", qualifiedByName="setProperties")
-    KafkaCluster toKafkaCluster(ClustersProperties.Cluster clusterProperties);
-    @Mapping(target = "diskUsage", source = "internalBrokerDiskUsage", qualifiedByName="mapDiskUsage")
-    ClusterStats toClusterStats(InternalClusterMetrics metrics);
-    @Mapping(target = "items", source = "metrics")
-    ClusterMetrics toClusterMetrics(InternalClusterMetrics metrics);
-    BrokerMetrics toBrokerMetrics(InternalBrokerMetrics metrics);
-    Topic toTopic(InternalTopic topic);
-    Partition toPartition(InternalPartition topic);
-    TopicDetails toTopicDetails(InternalTopic topic);
-    TopicConfig toTopicConfig(InternalTopicConfig topic);
-    Replica toReplica(InternalReplica replica);
-    Connect toKafkaConnect(KafkaConnectCluster connect);
-    List<Cluster.FeaturesEnum> toFeaturesEnum(List<Feature> features);
-
-    @Mapping(target = "isCompatible", source = "compatible")
-    CompatibilityCheckResponse toCompatibilityCheckResponse(InternalCompatibilityCheck dto);
-
-    @Mapping(target = "compatibility", source = "compatibilityLevel")
-    CompatibilityLevel toCompatibilityLevel(InternalCompatibilityLevel dto);
-
-    default TopicDetails toTopicDetails(InternalTopic topic, InternalClusterMetrics metrics) {
-        final TopicDetails result = toTopicDetails(topic);
-        result.setBytesInPerSec(
-                metrics.getBytesInPerSec().get(topic.getName())
-        );
-        result.setBytesOutPerSec(
-                metrics.getBytesOutPerSec().get(topic.getName())
-        );
-        return result;
+  @Mapping(target = "brokerCount", source = "metrics.brokerCount")
+  @Mapping(target = "onlinePartitionCount", source = "metrics.onlinePartitionCount")
+  @Mapping(target = "topicCount", source = "metrics.topicCount")
+  @Mapping(target = "bytesInPerSec", source = "metrics.bytesInPerSec",
+      qualifiedByName = "sumMetrics")
+  @Mapping(target = "bytesOutPerSec", source = "metrics.bytesOutPerSec",
+      qualifiedByName = "sumMetrics")
+  Cluster toCluster(KafkaCluster cluster);
+
+  @Mapping(target = "protobufFile", source = "protobufFile", qualifiedByName = "resolvePath")
+  @Mapping(target = "properties", source = "properties", qualifiedByName = "setProperties")
+  KafkaCluster toKafkaCluster(ClustersProperties.Cluster clusterProperties);
+
+  @Mapping(target = "diskUsage", source = "internalBrokerDiskUsage",
+      qualifiedByName = "mapDiskUsage")
+  ClusterStats toClusterStats(InternalClusterMetrics metrics);
+
+  @Mapping(target = "items", source = "metrics")
+  ClusterMetrics toClusterMetrics(InternalClusterMetrics metrics);
+
+  BrokerMetrics toBrokerMetrics(InternalBrokerMetrics metrics);
+
+  Topic toTopic(InternalTopic topic);
+
+  Partition toPartition(InternalPartition topic);
+
+  TopicDetails toTopicDetails(InternalTopic topic);
+
+  default TopicDetails toTopicDetails(InternalTopic topic, InternalClusterMetrics metrics) {
+    final TopicDetails result = toTopicDetails(topic);
+    result.setBytesInPerSec(
+        metrics.getBytesInPerSec().get(topic.getName())
+    );
+    result.setBytesOutPerSec(
+        metrics.getBytesOutPerSec().get(topic.getName())
+    );
+    return result;
+  }
+
+  TopicConfig toTopicConfig(InternalTopicConfig topic);
+
+  Replica toReplica(InternalReplica replica);
+
+  Connect toKafkaConnect(KafkaConnectCluster connect);
+
+  List<Cluster.FeaturesEnum> toFeaturesEnum(List<Feature> features);
+
+  @Mapping(target = "isCompatible", source = "compatible")
+  CompatibilityCheckResponse toCompatibilityCheckResponse(InternalCompatibilityCheck dto);
+
+  @Mapping(target = "compatibility", source = "compatibilityLevel")
+  CompatibilityLevel toCompatibilityLevel(InternalCompatibilityLevel dto);
+
+  default List<Partition> map(Map<Integer, InternalPartition> map) {
+    return map.values().stream().map(this::toPartition).collect(Collectors.toList());
+  }
+
+  default BrokerDiskUsage map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
+    final BrokerDiskUsage brokerDiskUsage = new BrokerDiskUsage();
+    brokerDiskUsage.setBrokerId(id);
+    brokerDiskUsage.segmentCount((int) internalBrokerDiskUsage.getSegmentCount());
+    brokerDiskUsage.segmentSize(internalBrokerDiskUsage.getSegmentSize());
+    return brokerDiskUsage;
+  }
+
+  default List<BrokerDiskUsage> mapDiskUsage(Map<Integer, InternalBrokerDiskUsage> brokers) {
+    return brokers.entrySet().stream().map(e -> this.map(e.getKey(), e.getValue()))
+        .collect(Collectors.toList());
+  }
+
+  default BigDecimal sumMetrics(Map<String, BigDecimal> metrics) {
+    if (metrics != null) {
+      return metrics.values().stream().reduce(BigDecimal.ZERO, BigDecimal::add);
+    } else {
+      return BigDecimal.ZERO;
     }
+  }
 
-     default List<Partition> map(Map<Integer, InternalPartition> map) {
-         return map.values().stream().map(this::toPartition).collect(Collectors.toList());
-     }
-
-     default List<BrokerDiskUsage> mapDiskUsage(Map<Integer, InternalBrokerDiskUsage> brokers) {
-         return brokers.entrySet().stream().map(e -> this.map(e.getKey(), e.getValue())).collect(Collectors.toList());
-     }
-
-     default BrokerDiskUsage map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
-         final BrokerDiskUsage brokerDiskUsage = new BrokerDiskUsage();
-         brokerDiskUsage.setBrokerId(id);
-         brokerDiskUsage.segmentCount((int)internalBrokerDiskUsage.getSegmentCount());
-         brokerDiskUsage.segmentSize(internalBrokerDiskUsage.getSegmentSize());
-         return brokerDiskUsage;
-     }
-
-     default BigDecimal sumMetrics(Map<String, BigDecimal> metrics) {
-         if (metrics != null) {
-           return metrics.values().stream().reduce(BigDecimal.ZERO, BigDecimal::add);
-         } else {
-           return BigDecimal.ZERO;
-         }
-     }
-
-     default Path resolvePath(String path) {
-        if (path != null) {
-            return Path.of(path);
-        } else {
-            return null;
-        }
-     }
-
-     default Properties setProperties(Properties properties) {
-       Properties copy = new Properties();
-       if (properties!=null) {
-         copy.putAll(properties);
-       }
-       return copy;
-     }
+  default Path resolvePath(String path) {
+    if (path != null) {
+      return Path.of(path);
+    } else {
+      return null;
+    }
+  }
+
+  default Properties setProperties(Properties properties) {
+    Properties copy = new Properties();
+    if (properties != null) {
+      copy.putAll(properties);
+    }
+    return copy;
+  }
 
 }

+ 15 - 8
kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/KafkaConnectMapper.java

@@ -3,22 +3,29 @@ package com.provectus.kafka.ui.mapper;
 import com.provectus.kafka.ui.connect.model.ConnectorStatusConnector;
 import com.provectus.kafka.ui.connect.model.ConnectorTask;
 import com.provectus.kafka.ui.connect.model.NewConnector;
-import com.provectus.kafka.ui.model.*;
+import com.provectus.kafka.ui.model.Connector;
+import com.provectus.kafka.ui.model.ConnectorPlugin;
+import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
+import com.provectus.kafka.ui.model.ConnectorStatus;
+import com.provectus.kafka.ui.model.Task;
+import com.provectus.kafka.ui.model.TaskStatus;
 import org.mapstruct.Mapper;
 
 @Mapper(componentModel = "spring")
 public interface KafkaConnectMapper {
-    NewConnector toClient(com.provectus.kafka.ui.model.NewConnector newConnector);
+  NewConnector toClient(com.provectus.kafka.ui.model.NewConnector newConnector);
 
-    Connector fromClient(com.provectus.kafka.ui.connect.model.Connector connector);
+  Connector fromClient(com.provectus.kafka.ui.connect.model.Connector connector);
 
-    ConnectorStatus fromClient(ConnectorStatusConnector connectorStatus);
+  ConnectorStatus fromClient(ConnectorStatusConnector connectorStatus);
 
-    Task fromClient(ConnectorTask connectorTask);
+  Task fromClient(ConnectorTask connectorTask);
 
-    TaskStatus fromClient(com.provectus.kafka.ui.connect.model.TaskStatus taskStatus);
+  TaskStatus fromClient(com.provectus.kafka.ui.connect.model.TaskStatus taskStatus);
 
-    ConnectorPlugin fromClient(com.provectus.kafka.ui.connect.model.ConnectorPlugin connectorPlugin);
+  ConnectorPlugin fromClient(com.provectus.kafka.ui.connect.model.ConnectorPlugin connectorPlugin);
 
-    ConnectorPluginConfigValidationResponse fromClient(com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse connectorPluginConfigValidationResponse);
+  ConnectorPluginConfigValidationResponse fromClient(
+      com.provectus.kafka.ui.connect.model.ConnectorPluginConfigValidationResponse
+          connectorPluginConfigValidationResponse);
 }

+ 3 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java

@@ -1,13 +1,12 @@
 package com.provectus.kafka.ui.model;
 
-import lombok.Value;
-
 import java.util.Map;
+import lombok.Value;
 
 @Value
 public class ConsumerPosition {
 
-	private SeekType seekType;
-	private Map<Integer, Long> seekTo;
+  private SeekType seekType;
+  private Map<Integer, Long> seekTo;
 
 }

+ 11 - 12
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ExtendedAdminClient.java

@@ -1,27 +1,26 @@
 package com.provectus.kafka.ui.model;
 
 import com.provectus.kafka.ui.util.ClusterUtil;
+import java.util.Set;
 import lombok.Data;
 import lombok.RequiredArgsConstructor;
 import org.apache.kafka.clients.admin.AdminClient;
 import reactor.core.publisher.Mono;
 
-import java.util.Set;
-
 @Data
 @RequiredArgsConstructor
 public class ExtendedAdminClient {
 
-    private final AdminClient adminClient;
-    private final Set<SupportedFeature> supportedFeatures;
+  private final AdminClient adminClient;
+  private final Set<SupportedFeature> supportedFeatures;
 
-    public enum SupportedFeature {
-        INCREMENTAL_ALTER_CONFIGS,
-        ALTER_CONFIGS
-    }
+  public static Mono<ExtendedAdminClient> extendedAdminClient(AdminClient adminClient) {
+    return ClusterUtil.getSupportedFeatures(adminClient)
+        .map(s -> new ExtendedAdminClient(adminClient, s));
+  }
 
-    public static Mono<ExtendedAdminClient> extendedAdminClient(AdminClient adminClient) {
-        return ClusterUtil.getSupportedFeatures(adminClient)
-                .map(s -> new ExtendedAdminClient(adminClient, s));
-    }
+  public enum SupportedFeature {
+    INCREMENTAL_ALTER_CONFIGS,
+    ALTER_CONFIGS
+  }
 }

+ 14 - 14
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/Feature.java

@@ -7,21 +7,21 @@ import java.util.function.Predicate;
 import java.util.stream.Collectors;
 
 public enum Feature {
-    KAFKA_CONNECT(cluster -> Optional.ofNullable(cluster.getKafkaConnect())
-            .filter(Predicate.not(List::isEmpty))
-            .isPresent()
-    ),
-    SCHEMA_REGISTRY(cluster -> cluster.getSchemaRegistry() != null);
+  KAFKA_CONNECT(cluster -> Optional.ofNullable(cluster.getKafkaConnect())
+      .filter(Predicate.not(List::isEmpty))
+      .isPresent()
+  ),
+  SCHEMA_REGISTRY(cluster -> cluster.getSchemaRegistry() != null);
 
-    private final Predicate<KafkaCluster> isEnabled;
+  private final Predicate<KafkaCluster> isEnabled;
 
-    Feature(Predicate<KafkaCluster> isEnabled) {
-        this.isEnabled = isEnabled;
-    }
+  Feature(Predicate<KafkaCluster> isEnabled) {
+    this.isEnabled = isEnabled;
+  }
 
-    public static List<Feature> getEnabledFeatures(KafkaCluster cluster) {
-        return Arrays.stream(values())
-                .filter(feature -> feature.isEnabled.test(cluster))
-                .collect(Collectors.toList());
-    }
+  public static List<Feature> getEnabledFeatures(KafkaCluster cluster) {
+    return Arrays.stream(values())
+        .filter(feature -> feature.isEnabled.test(cluster))
+        .collect(Collectors.toList());
+  }
 }

+ 2 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBrokerDiskUsage.java

@@ -6,6 +6,6 @@ import lombok.Data;
 @Data
 @Builder(toBuilder = true)
 public class InternalBrokerDiskUsage {
-    private final long segmentCount;
-    private final long segmentSize;
+  private final long segmentCount;
+  private final long segmentSize;
 }

+ 2 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBrokerMetrics.java

@@ -1,12 +1,11 @@
 package com.provectus.kafka.ui.model;
 
+import java.util.List;
 import lombok.Builder;
 import lombok.Data;
 
-import java.util.List;
-
 @Data
 @Builder(toBuilder = true)
 public class InternalBrokerMetrics {
-    private final List<Metric> metrics;
+  private final List<Metric> metrics;
 }

+ 19 - 20
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalClusterMetrics.java

@@ -1,31 +1,30 @@
 package com.provectus.kafka.ui.model;
 
-import lombok.Builder;
-import lombok.Data;
-
 import java.math.BigDecimal;
 import java.util.List;
 import java.util.Map;
+import lombok.Builder;
+import lombok.Data;
 
 
 @Data
 @Builder(toBuilder = true)
 public class InternalClusterMetrics {
-    private final int brokerCount;
-    private final int topicCount;
-    private final int activeControllers;
-    private final int uncleanLeaderElectionCount;
-    private final int onlinePartitionCount;
-    private final int underReplicatedPartitionCount;
-    private final int offlinePartitionCount;
-    private final int inSyncReplicasCount;
-    private final int outOfSyncReplicasCount;
-    private final Map<String, BigDecimal> bytesInPerSec;
-    private final Map<String, BigDecimal> bytesOutPerSec;
-    private final long segmentCount;
-    private final long segmentSize;
-    private final Map<Integer, InternalBrokerDiskUsage> internalBrokerDiskUsage;
-    private final Map<Integer, InternalBrokerMetrics> internalBrokerMetrics;
-    private final List<Metric> metrics;
-    private final int zooKeeperStatus;
+  private final int brokerCount;
+  private final int topicCount;
+  private final int activeControllers;
+  private final int uncleanLeaderElectionCount;
+  private final int onlinePartitionCount;
+  private final int underReplicatedPartitionCount;
+  private final int offlinePartitionCount;
+  private final int inSyncReplicasCount;
+  private final int outOfSyncReplicasCount;
+  private final Map<String, BigDecimal> bytesInPerSec;
+  private final Map<String, BigDecimal> bytesOutPerSec;
+  private final long segmentCount;
+  private final long segmentSize;
+  private final Map<Integer, InternalBrokerDiskUsage> internalBrokerDiskUsage;
+  private final Map<Integer, InternalBrokerMetrics> internalBrokerMetrics;
+  private final List<Metric> metrics;
+  private final int zooKeeperStatus;
 }

+ 10 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalPartition.java

@@ -1,20 +1,19 @@
 package com.provectus.kafka.ui.model;
 
+import java.util.List;
 import lombok.Builder;
 import lombok.Data;
 
-import java.util.List;
-
 @Data
 @Builder(toBuilder = true)
 public class InternalPartition {
-    private final int partition;
-    private final Integer leader;
-    private final List<InternalReplica> replicas;
-    private final int inSyncReplicasCount;
-    private final int replicasCount;
-    private final long offsetMin;
-    private final long offsetMax;
-    private final long segmentSize;
-    private final long segmentCount;
+  private final int partition;
+  private final Integer leader;
+  private final List<InternalReplica> replicas;
+  private final int inSyncReplicasCount;
+  private final int replicasCount;
+  private final long offsetMin;
+  private final long offsetMax;
+  private final long segmentSize;
+  private final long segmentCount;
 }

+ 3 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalReplica.java

@@ -8,7 +8,7 @@ import lombok.RequiredArgsConstructor;
 @Builder
 @RequiredArgsConstructor
 public class InternalReplica {
-    private final int broker;
-    private final boolean leader;
-    private final boolean inSync;
+  private final int broker;
+  private final boolean leader;
+  private final boolean inSync;
 }

+ 3 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSegmentSizeDto.java

@@ -1,14 +1,13 @@
 package com.provectus.kafka.ui.model;
 
+import java.util.Map;
 import lombok.Builder;
 import lombok.Data;
 
-import java.util.Map;
-
 @Data
 @Builder(toBuilder = true)
 public class InternalSegmentSizeDto {
 
-    private final Map<String, InternalTopic> internalTopicWithSegmentSize;
-    private final InternalClusterMetrics clusterMetricsWithSegmentSize;
+  private final Map<String, InternalTopic> internalTopicWithSegmentSize;
+  private final InternalClusterMetrics clusterMetricsWithSegmentSize;
 }

+ 13 - 14
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopic.java

@@ -1,25 +1,24 @@
 package com.provectus.kafka.ui.model;
 
-import lombok.Builder;
-import lombok.Data;
-
 import java.util.List;
 import java.util.Map;
+import lombok.Builder;
+import lombok.Data;
 
 @Data
 @Builder(toBuilder = true)
 public class InternalTopic {
 
-    private final String name;
-    private final boolean internal;
-    private final Map<Integer,InternalPartition> partitions;
-    private final List<InternalTopicConfig> topicConfigs;
+  private final String name;
+  private final boolean internal;
+  private final Map<Integer, InternalPartition> partitions;
+  private final List<InternalTopicConfig> topicConfigs;
 
-    private final int replicas;
-    private final int partitionCount;
-    private final int inSyncReplicas;
-    private final int replicationFactor;
-    private final int underReplicatedPartitions;
-    private final long segmentSize;
-    private final long segmentCount;
+  private final int replicas;
+  private final int partitionCount;
+  private final int inSyncReplicas;
+  private final int replicationFactor;
+  private final int underReplicatedPartitions;
+  private final long segmentSize;
+  private final long segmentCount;
 }

+ 3 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopicConfig.java

@@ -7,7 +7,7 @@ import lombok.Data;
 @Data
 @Builder
 public class InternalTopicConfig {
-    private final String name;
-    private final String value;
-    private final String defaultValue;
+  private final String name;
+  private final String value;
+  private final String defaultValue;
 }

+ 20 - 21
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java

@@ -1,32 +1,31 @@
 package com.provectus.kafka.ui.model;
 
+import java.nio.file.Path;
 import java.util.List;
+import java.util.Map;
 import java.util.Properties;
 import lombok.Builder;
 import lombok.Data;
 
-import java.nio.file.Path;
-import java.util.Map;
-
 @Data
 @Builder(toBuilder = true)
 public class KafkaCluster {
-    private final String name;
-    private final Integer jmxPort;
-    private final String bootstrapServers;
-    private final String zookeeper;
-    private final String schemaRegistry;
-    private final List<KafkaConnectCluster> kafkaConnect;
-    private final String schemaNameTemplate;
-    private final ServerStatus status;
-    private final ServerStatus zookeeperStatus;
-    private final InternalClusterMetrics metrics;
-    private final Map<String, InternalTopic> topics;
-    private final Throwable lastKafkaException;
-    private final Throwable lastZookeeperException;
-    private final Path protobufFile;
-    private final String protobufMessageName;
-    private final Properties properties;
-    private final Boolean readOnly;
-    private final List<Feature> features;
+  private final String name;
+  private final Integer jmxPort;
+  private final String bootstrapServers;
+  private final String zookeeper;
+  private final String schemaRegistry;
+  private final List<KafkaConnectCluster> kafkaConnect;
+  private final String schemaNameTemplate;
+  private final ServerStatus status;
+  private final ServerStatus zookeeperStatus;
+  private final InternalClusterMetrics metrics;
+  private final Map<String, InternalTopic> topics;
+  private final Throwable lastKafkaException;
+  private final Throwable lastZookeeperException;
+  private final Path protobufFile;
+  private final String protobufMessageName;
+  private final Properties properties;
+  private final Boolean readOnly;
+  private final List<Feature> features;
 }

+ 2 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaConnectCluster.java

@@ -6,6 +6,6 @@ import lombok.Data;
 @Data
 @Builder(toBuilder = true)
 public class KafkaConnectCluster {
-    private final String name;
-    private final String address;
+  private final String name;
+  private final String address;
 }

+ 2 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/InternalCompatibilityCheck.java

@@ -5,6 +5,6 @@ import lombok.Data;
 
 @Data
 public class InternalCompatibilityCheck {
-    @JsonProperty("is_compatible")
-    private boolean isCompatible;
+  @JsonProperty("is_compatible")
+  private boolean isCompatible;
 }

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/InternalCompatibilityLevel.java

@@ -4,5 +4,5 @@ import lombok.Data;
 
 @Data
 public class InternalCompatibilityLevel {
-    private String compatibilityLevel;
+  private String compatibilityLevel;
 }

+ 7 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/InternalNewSchema.java

@@ -6,12 +6,12 @@ import lombok.Data;
 
 @Data
 public class InternalNewSchema {
-    private String schema;
-    @JsonInclude(JsonInclude.Include.NON_NULL)
-    private SchemaType schemaType;
+  private String schema;
+  @JsonInclude(JsonInclude.Include.NON_NULL)
+  private SchemaType schemaType;
 
-    public InternalNewSchema(String schema, SchemaType schemaType) {
-        this.schema = schema;
-        this.schemaType = schemaType;
-    }
+  public InternalNewSchema(String schema, SchemaType schemaType) {
+    this.schema = schema;
+    this.schemaType = schemaType;
+  }
 }

+ 1 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/schemaregistry/SubjectIdResponse.java

@@ -4,5 +4,5 @@ import lombok.Data;
 
 @Data
 public class SubjectIdResponse {
-    private Integer id;
+  private Integer id;
 }

+ 228 - 192
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java

@@ -2,11 +2,34 @@ package com.provectus.kafka.ui.service;
 
 import com.provectus.kafka.ui.exception.NotFoundException;
 import com.provectus.kafka.ui.mapper.ClusterMapper;
+import com.provectus.kafka.ui.model.Broker;
+import com.provectus.kafka.ui.model.BrokerMetrics;
+import com.provectus.kafka.ui.model.Cluster;
+import com.provectus.kafka.ui.model.ClusterMetrics;
+import com.provectus.kafka.ui.model.ClusterStats;
+import com.provectus.kafka.ui.model.ConsumerGroup;
+import com.provectus.kafka.ui.model.ConsumerGroupDetails;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.Topic;
+import com.provectus.kafka.ui.model.TopicConfig;
+import com.provectus.kafka.ui.model.TopicDetails;
+import com.provectus.kafka.ui.model.TopicFormData;
+import com.provectus.kafka.ui.model.TopicMessage;
+import com.provectus.kafka.ui.model.TopicsResponse;
 import com.provectus.kafka.ui.util.ClusterUtil;
-import com.provectus.kafka.ui.model.*;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import lombok.RequiredArgsConstructor;
 import lombok.SneakyThrows;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
@@ -18,200 +41,213 @@ import org.springframework.stereotype.Service;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 
-import java.util.*;
-import java.util.function.Predicate;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
 @Service
 @RequiredArgsConstructor
 public class ClusterService {
-    private static final Integer DEFAULT_PAGE_SIZE = 20;
-
-    private final ClustersStorage clustersStorage;
-    private final ClusterMapper clusterMapper;
-    private final KafkaService kafkaService;
-    private final ConsumingService consumingService;
-
-    public List<Cluster> getClusters() {
-        return clustersStorage.getKafkaClusters()
-                .stream()
-                .map(clusterMapper::toCluster)
-                .collect(Collectors.toList());
-    }
-
-    public Mono<BrokerMetrics> getBrokerMetrics(String name, Integer id) {
-        return Mono.justOrEmpty(clustersStorage.getClusterByName(name)
-                .map(c -> c.getMetrics().getInternalBrokerMetrics())
-                .map(m -> m.get(id))
-                .map(clusterMapper::toBrokerMetrics));
-    }
-
-    public Mono<ClusterStats> getClusterStats(String name) {
-        return Mono.justOrEmpty(
-                clustersStorage.getClusterByName(name)
-                        .map(KafkaCluster::getMetrics)
-                        .map(clusterMapper::toClusterStats)
+  private static final Integer DEFAULT_PAGE_SIZE = 20;
+
+  private final ClustersStorage clustersStorage;
+  private final ClusterMapper clusterMapper;
+  private final KafkaService kafkaService;
+  private final ConsumingService consumingService;
+
+  public List<Cluster> getClusters() {
+    return clustersStorage.getKafkaClusters()
+        .stream()
+        .map(clusterMapper::toCluster)
+        .collect(Collectors.toList());
+  }
+
+  public Mono<BrokerMetrics> getBrokerMetrics(String name, Integer id) {
+    return Mono.justOrEmpty(clustersStorage.getClusterByName(name)
+        .map(c -> c.getMetrics().getInternalBrokerMetrics())
+        .map(m -> m.get(id))
+        .map(clusterMapper::toBrokerMetrics));
+  }
+
+  public Mono<ClusterStats> getClusterStats(String name) {
+    return Mono.justOrEmpty(
+        clustersStorage.getClusterByName(name)
+            .map(KafkaCluster::getMetrics)
+            .map(clusterMapper::toClusterStats)
+    );
+  }
+
+  public Mono<ClusterMetrics> getClusterMetrics(String name) {
+    return Mono.justOrEmpty(
+        clustersStorage.getClusterByName(name)
+            .map(KafkaCluster::getMetrics)
+            .map(clusterMapper::toClusterMetrics)
+    );
+  }
+
+
+  public TopicsResponse getTopics(String name, Optional<Integer> page,
+                                  Optional<Integer> nullablePerPage) {
+    Predicate<Integer> positiveInt = i -> i > 0;
+    int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
+    var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
+    var cluster = clustersStorage.getClusterByName(name)
+        .orElseThrow(() -> new NotFoundException("No such cluster"));
+    var totalPages = (cluster.getTopics().size() / perPage)
+        + (cluster.getTopics().size() % perPage == 0 ? 0 : 1);
+    return new TopicsResponse()
+        .pageCount(totalPages)
+        .topics(
+            cluster.getTopics().values().stream()
+                .sorted(Comparator.comparing(InternalTopic::getName))
+                .skip(topicsToSkip)
+                .limit(perPage)
+                .map(clusterMapper::toTopic)
+                .collect(Collectors.toList())
         );
-    }
-
-    public Mono<ClusterMetrics> getClusterMetrics(String name) {
-        return Mono.justOrEmpty(
-                clustersStorage.getClusterByName(name)
-                        .map(KafkaCluster::getMetrics)
-                        .map(clusterMapper::toClusterMetrics)
+  }
+
+  public Optional<TopicDetails> getTopicDetails(String name, String topicName) {
+    return clustersStorage.getClusterByName(name)
+        .flatMap(c ->
+            Optional.ofNullable(
+                c.getTopics().get(topicName)
+            ).map(
+                t -> t.toBuilder().partitions(
+                    kafkaService.getTopicPartitions(c, t)
+                ).build()
+            ).map(t -> clusterMapper.toTopicDetails(t, c.getMetrics()))
         );
-    }
-
-
-    public TopicsResponse getTopics(String name, Optional<Integer> page, Optional<Integer> nullablePerPage) {
-        Predicate<Integer> positiveInt = i -> i > 0;
-        int perPage = nullablePerPage.filter(positiveInt).orElse(DEFAULT_PAGE_SIZE);
-        var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
-        var cluster = clustersStorage.getClusterByName(name).orElseThrow(() -> new NotFoundException("No such cluster"));
-        var totalPages = (cluster.getTopics().size() / perPage) + (cluster.getTopics().size() % perPage == 0 ? 0 : 1);
-        return new TopicsResponse()
-                .pageCount(totalPages)
-                .topics(
-                        cluster.getTopics().values().stream()
-                                .sorted(Comparator.comparing(InternalTopic::getName))
-                                .skip(topicsToSkip)
-                                .limit(perPage)
-                                .map(clusterMapper::toTopic)
-                                .collect(Collectors.toList())
-                );
-    }
-
-    public Optional<TopicDetails> getTopicDetails(String name, String topicName) {
-        return clustersStorage.getClusterByName(name)
-                .flatMap(c ->
-                        Optional.ofNullable(
-                                c.getTopics().get(topicName)
-                        ).map(
-                                t -> t.toBuilder().partitions(
-                                        kafkaService.getTopicPartitions(c, t)
-                                ).build()
-                        ).map(t -> clusterMapper.toTopicDetails(t, c.getMetrics()))
-                );
-    }
-
-    public Optional<List<TopicConfig>> getTopicConfigs(String name, String topicName) {
-        return clustersStorage.getClusterByName(name)
-                .map(KafkaCluster::getTopics)
-                .map(t -> t.get(topicName))
-                .map(t -> t.getTopicConfigs().stream().map(clusterMapper::toTopicConfig).collect(Collectors.toList()));
-    }
-
-    public Mono<Topic> createTopic(String clusterName, Mono<TopicFormData> topicFormData) {
-        return clustersStorage.getClusterByName(clusterName).map(cluster ->
-                kafkaService.createTopic(cluster, topicFormData)
-                        .doOnNext(t -> updateCluster(t, clusterName, cluster))
-                        .map(clusterMapper::toTopic)
-        ).orElse(Mono.empty());
-    }
-
-    @SneakyThrows
-    public Mono<ConsumerGroupDetails> getConsumerGroupDetail(String clusterName, String consumerGroupId) {
-        var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
-
-        return kafkaService.getOrCreateAdminClient(cluster).map(ac ->
-                ac.getAdminClient().describeConsumerGroups(Collections.singletonList(consumerGroupId)).all()
-        ).flatMap(groups ->
-                groupMetadata(cluster, consumerGroupId)
-                        .flatMap(offsets -> {
-                            Map<TopicPartition, Long> endOffsets = topicPartitionsEndOffsets(cluster, offsets.keySet());
-                            return ClusterUtil.toMono(groups).map(s -> s.get(consumerGroupId).members().stream()
-                                    .flatMap(c -> Stream.of(ClusterUtil.convertToConsumerTopicPartitionDetails(c, offsets, endOffsets)))
-                                    .collect(Collectors.toList()).stream().flatMap(t -> t.stream().flatMap(Stream::of)).collect(Collectors.toList()));
-                        })
-        )
-                .map(c -> new ConsumerGroupDetails().consumers(c).consumerGroupId(consumerGroupId));
-
-    }
-
-    public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster, String consumerGroupId) {
-        return
-                kafkaService.getOrCreateAdminClient(cluster)
-                        .map(ac -> ac.getAdminClient().listConsumerGroupOffsets(consumerGroupId).partitionsToOffsetAndMetadata())
-                        .flatMap(ClusterUtil::toMono);
-    }
-
-    public Map<TopicPartition, Long> topicPartitionsEndOffsets(KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
-        Properties properties = new Properties();
-        properties.putAll(cluster.getProperties());
-        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
-        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
-        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
-        properties.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
-
-        try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties)) {
-            return consumer.endOffsets(topicPartitions);
-        }
-    }
-
-    @SneakyThrows
-    public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(kafkaService::getConsumerGroups)
-                .orElse(Mono.empty());
-    }
-
-    public Flux<Broker> getBrokers(String clusterName) {
-        return kafkaService.getOrCreateAdminClient(clustersStorage.getClusterByName(clusterName).orElseThrow())
-                .flatMap(client -> ClusterUtil.toMono(client.getAdminClient().describeCluster().nodes())
-                        .map(n -> n.stream().map(node -> {
-                            Broker broker = new Broker();
-                            broker.setId(node.id());
-                            broker.setHost(node.host());
-                            return broker;
-                        }).collect(Collectors.toList())))
-                .flatMapMany(Flux::fromIterable);
-    }
-
-    @SneakyThrows
-    public Mono<Topic> updateTopic(String clusterName, String topicName, Mono<TopicFormData> topicFormData) {
-        return clustersStorage.getClusterByName(clusterName).map(cl ->
-                topicFormData
-                        .flatMap(t -> kafkaService.updateTopic(cl, topicName, t))
-                        .doOnNext(t -> updateCluster(t, clusterName, cl))
-                        .map(clusterMapper::toTopic)
-        ).orElse(Mono.empty());
-    }
-
-    public Mono<Void> deleteTopic(String clusterName, String topicName) {
-        var cluster = clustersStorage.getClusterByName(clusterName)
-                .orElseThrow(() -> new NotFoundException("No such cluster"));
-        getTopicDetails(clusterName, topicName)
-                .orElseThrow(() -> new NotFoundException("No such topic"));
-        return kafkaService.deleteTopic(cluster, topicName)
-                .doOnNext(t -> updateCluster(topicName, clusterName, cluster));
-    }
-
-    private KafkaCluster updateCluster(InternalTopic topic, String clusterName, KafkaCluster cluster) {
-        final KafkaCluster updatedCluster = kafkaService.getUpdatedCluster(cluster, topic);
-        clustersStorage.setKafkaCluster(clusterName, updatedCluster);
-        return updatedCluster;
-    }
-
-    private KafkaCluster updateCluster(String topicToDelete, String clusterName, KafkaCluster cluster) {
-        final KafkaCluster updatedCluster = kafkaService.getUpdatedCluster(cluster, topicToDelete);
-        clustersStorage.setKafkaCluster(clusterName, updatedCluster);
-        return updatedCluster;
-    }
-
-    public Flux<TopicMessage> getMessages(String clusterName, String topicName, ConsumerPosition consumerPosition, String query, Integer limit) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(c -> consumingService.loadMessages(c, topicName, consumerPosition, query, limit))
-                .orElse(Flux.empty());
-    }
-
-    public Mono<Void> deleteTopicMessages(String clusterName, String topicName, List<Integer> partitions) {
-        var cluster = clustersStorage.getClusterByName(clusterName)
-                .orElseThrow(() -> new NotFoundException("No such cluster"));
-        if (!cluster.getTopics().containsKey(topicName)) {
-            throw new NotFoundException("No such topic");
-        }
-        return consumingService.loadOffsets(cluster, topicName, partitions)
-                .flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
-    }
+  }
+
+  public Optional<List<TopicConfig>> getTopicConfigs(String name, String topicName) {
+    return clustersStorage.getClusterByName(name)
+        .map(KafkaCluster::getTopics)
+        .map(t -> t.get(topicName))
+        .map(t -> t.getTopicConfigs().stream().map(clusterMapper::toTopicConfig)
+            .collect(Collectors.toList()));
+  }
+
+  public Mono<Topic> createTopic(String clusterName, Mono<TopicFormData> topicFormData) {
+    return clustersStorage.getClusterByName(clusterName).map(cluster ->
+        kafkaService.createTopic(cluster, topicFormData)
+            .doOnNext(t -> updateCluster(t, clusterName, cluster))
+            .map(clusterMapper::toTopic)
+    ).orElse(Mono.empty());
+  }
+
+  @SneakyThrows
+  public Mono<ConsumerGroupDetails> getConsumerGroupDetail(String clusterName,
+                                                           String consumerGroupId) {
+    var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
+
+    return kafkaService.getOrCreateAdminClient(cluster).map(ac ->
+        ac.getAdminClient().describeConsumerGroups(Collections.singletonList(consumerGroupId)).all()
+    ).flatMap(groups ->
+        groupMetadata(cluster, consumerGroupId)
+            .flatMap(offsets -> {
+              Map<TopicPartition, Long> endOffsets =
+                  topicPartitionsEndOffsets(cluster, offsets.keySet());
+              return ClusterUtil.toMono(groups).map(s -> s.get(consumerGroupId).members().stream()
+                  .flatMap(c -> Stream.of(ClusterUtil
+                      .convertToConsumerTopicPartitionDetails(c, offsets, endOffsets)))
+                  .collect(Collectors.toList()).stream()
+                  .flatMap(t -> t.stream().flatMap(Stream::of)).collect(Collectors.toList()));
+            })
+    )
+        .map(c -> new ConsumerGroupDetails().consumers(c).consumerGroupId(consumerGroupId));
+
+  }
+
+  public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster,
+                                                                    String consumerGroupId) {
+    return
+        kafkaService.getOrCreateAdminClient(cluster)
+            .map(ac -> ac.getAdminClient().listConsumerGroupOffsets(consumerGroupId)
+                .partitionsToOffsetAndMetadata())
+            .flatMap(ClusterUtil::toMono);
+  }
+
+  public Map<TopicPartition, Long> topicPartitionsEndOffsets(
+      KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
+    Properties properties = new Properties();
+    properties.putAll(cluster.getProperties());
+    properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
+    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
+    properties.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
+
+    try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties)) {
+      return consumer.endOffsets(topicPartitions);
+    }
+  }
+
+  @SneakyThrows
+  public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(kafkaService::getConsumerGroups)
+        .orElse(Mono.empty());
+  }
+
+  public Flux<Broker> getBrokers(String clusterName) {
+    return kafkaService
+        .getOrCreateAdminClient(clustersStorage.getClusterByName(clusterName).orElseThrow())
+        .flatMap(client -> ClusterUtil.toMono(client.getAdminClient().describeCluster().nodes())
+            .map(n -> n.stream().map(node -> {
+              Broker broker = new Broker();
+              broker.setId(node.id());
+              broker.setHost(node.host());
+              return broker;
+            }).collect(Collectors.toList())))
+        .flatMapMany(Flux::fromIterable);
+  }
+
+  @SneakyThrows
+  public Mono<Topic> updateTopic(String clusterName, String topicName,
+                                 Mono<TopicFormData> topicFormData) {
+    return clustersStorage.getClusterByName(clusterName).map(cl ->
+        topicFormData
+            .flatMap(t -> kafkaService.updateTopic(cl, topicName, t))
+            .doOnNext(t -> updateCluster(t, clusterName, cl))
+            .map(clusterMapper::toTopic)
+    ).orElse(Mono.empty());
+  }
+
+  public Mono<Void> deleteTopic(String clusterName, String topicName) {
+    var cluster = clustersStorage.getClusterByName(clusterName)
+        .orElseThrow(() -> new NotFoundException("No such cluster"));
+    getTopicDetails(clusterName, topicName)
+        .orElseThrow(() -> new NotFoundException("No such topic"));
+    return kafkaService.deleteTopic(cluster, topicName)
+        .doOnNext(t -> updateCluster(topicName, clusterName, cluster));
+  }
+
+  private KafkaCluster updateCluster(InternalTopic topic, String clusterName,
+                                     KafkaCluster cluster) {
+    final KafkaCluster updatedCluster = kafkaService.getUpdatedCluster(cluster, topic);
+    clustersStorage.setKafkaCluster(clusterName, updatedCluster);
+    return updatedCluster;
+  }
+
+  private KafkaCluster updateCluster(String topicToDelete, String clusterName,
+                                     KafkaCluster cluster) {
+    final KafkaCluster updatedCluster = kafkaService.getUpdatedCluster(cluster, topicToDelete);
+    clustersStorage.setKafkaCluster(clusterName, updatedCluster);
+    return updatedCluster;
+  }
+
+  public Flux<TopicMessage> getMessages(String clusterName, String topicName,
+                                        ConsumerPosition consumerPosition, String query,
+                                        Integer limit) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(c -> consumingService.loadMessages(c, topicName, consumerPosition, query, limit))
+        .orElse(Flux.empty());
+  }
+
+  public Mono<Void> deleteTopicMessages(String clusterName, String topicName,
+                                        List<Integer> partitions) {
+    var cluster = clustersStorage.getClusterByName(clusterName)
+        .orElseThrow(() -> new NotFoundException("No such cluster"));
+    if (!cluster.getTopics().containsKey(topicName)) {
+      throw new NotFoundException("No such topic");
+    }
+    return consumingService.loadOffsets(cluster, topicName, partitions)
+        .flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
+  }
 }

+ 12 - 13
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java

@@ -1,5 +1,6 @@
 package com.provectus.kafka.ui.service;
 
+import java.util.Map;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.scheduling.annotation.Scheduled;
@@ -7,24 +8,22 @@ import org.springframework.stereotype.Component;
 import reactor.core.publisher.Flux;
 import reactor.core.scheduler.Schedulers;
 
-import java.util.Map;
-
 @Component
 @RequiredArgsConstructor
 @Log4j2
 public class ClustersMetricsScheduler {
 
-    private final ClustersStorage clustersStorage;
+  private final ClustersStorage clustersStorage;
 
-    private final MetricsUpdateService metricsUpdateService;
+  private final MetricsUpdateService metricsUpdateService;
 
-    @Scheduled(fixedRate = 30000)
-    public void updateMetrics() {
-        Flux.fromIterable(clustersStorage.getKafkaClustersMap().entrySet())
-                .subscribeOn(Schedulers.parallel())
-                .map(Map.Entry::getValue)
-                .flatMap(metricsUpdateService::updateMetrics)
-                .doOnNext(s -> clustersStorage.setKafkaCluster(s.getName(), s))
-                .subscribe();
-    }
+  @Scheduled(fixedRate = 30000)
+  public void updateMetrics() {
+    Flux.fromIterable(clustersStorage.getKafkaClustersMap().entrySet())
+        .subscribeOn(Schedulers.parallel())
+        .map(Map.Entry::getValue)
+        .flatMap(metricsUpdateService::updateMetrics)
+        .doOnNext(s -> clustersStorage.setKafkaCluster(s.getName(), s))
+        .subscribe();
+  }
 }

+ 37 - 37
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStorage.java

@@ -4,55 +4,55 @@ import com.provectus.kafka.ui.config.ClustersProperties;
 import com.provectus.kafka.ui.mapper.ClusterMapper;
 import com.provectus.kafka.ui.model.Feature;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import lombok.RequiredArgsConstructor;
-import org.mapstruct.factory.Mappers;
-import org.springframework.stereotype.Component;
-
-import javax.annotation.PostConstruct;
 import java.util.Collection;
 import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
+import javax.annotation.PostConstruct;
+import lombok.RequiredArgsConstructor;
+import org.mapstruct.factory.Mappers;
+import org.springframework.stereotype.Component;
 
 @Component
 @RequiredArgsConstructor
 public class ClustersStorage {
 
-    private final Map<String, KafkaCluster> kafkaClusters = new ConcurrentHashMap<>();
-
-    private final ClustersProperties clusterProperties;
-
-    private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
-
-    @PostConstruct
-    public void init() {
-        for (ClustersProperties.Cluster clusterProperties : clusterProperties.getClusters()) {
-            if (kafkaClusters.get(clusterProperties.getName()) != null) {
-                throw new IllegalStateException("Application config isn't correct. Two clusters can't have the same name");
-            }
-            KafkaCluster cluster = clusterMapper.toKafkaCluster(clusterProperties);
-            kafkaClusters.put(
-                    clusterProperties.getName(),
-                    cluster.toBuilder()
-                            .features(Feature.getEnabledFeatures(cluster))
-                            .build()
-            );
-        }
+  private final Map<String, KafkaCluster> kafkaClusters = new ConcurrentHashMap<>();
+
+  private final ClustersProperties clusterProperties;
+
+  private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
+
+  @PostConstruct
+  public void init() {
+    for (ClustersProperties.Cluster clusterProperties : clusterProperties.getClusters()) {
+      if (kafkaClusters.get(clusterProperties.getName()) != null) {
+        throw new IllegalStateException(
+            "Application config isn't correct. Two clusters can't have the same name");
+      }
+      KafkaCluster cluster = clusterMapper.toKafkaCluster(clusterProperties);
+      kafkaClusters.put(
+          clusterProperties.getName(),
+          cluster.toBuilder()
+              .features(Feature.getEnabledFeatures(cluster))
+              .build()
+      );
     }
+  }
 
-    public Collection<KafkaCluster> getKafkaClusters() {
-        return kafkaClusters.values();
-    }
+  public Collection<KafkaCluster> getKafkaClusters() {
+    return kafkaClusters.values();
+  }
 
-    public Optional<KafkaCluster> getClusterByName(String clusterName) {
-        return Optional.ofNullable(kafkaClusters.get(clusterName));
-    }
+  public Optional<KafkaCluster> getClusterByName(String clusterName) {
+    return Optional.ofNullable(kafkaClusters.get(clusterName));
+  }
 
-    public void setKafkaCluster(String key, KafkaCluster kafkaCluster) {
-        this.kafkaClusters.put(key, kafkaCluster);
-    }
+  public void setKafkaCluster(String key, KafkaCluster kafkaCluster) {
+    this.kafkaClusters.put(key, kafkaCluster);
+  }
 
-    public Map<String, KafkaCluster> getKafkaClustersMap() {
-        return kafkaClusters;
-    }
+  public Map<String, KafkaCluster> getKafkaClustersMap() {
+    return kafkaClusters;
+  }
 }

+ 179 - 173
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java

@@ -6,9 +6,15 @@ import com.provectus.kafka.ui.deserialization.DeserializationService;
 import com.provectus.kafka.ui.deserialization.RecordDeserializer;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.util.ClusterUtil;
 import com.provectus.kafka.ui.model.SeekType;
 import com.provectus.kafka.ui.model.TopicMessage;
+import com.provectus.kafka.ui.util.ClusterUtil;
+import java.time.Duration;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.apache.commons.lang3.StringUtils;
@@ -23,181 +29,181 @@ import reactor.core.publisher.FluxSink;
 import reactor.core.publisher.Mono;
 import reactor.core.scheduler.Schedulers;
 
-import java.time.Duration;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.stream.Collectors;
-
 @Service
 @Log4j2
 @RequiredArgsConstructor
 public class ConsumingService {
 
-	private static final int MAX_RECORD_LIMIT = 100;
-	private static final int DEFAULT_RECORD_LIMIT = 20;
-
-	private final KafkaService kafkaService;
-	private final DeserializationService deserializationService;
-	private final ObjectMapper objectMapper = new ObjectMapper();
-
-	public Flux<TopicMessage> loadMessages(KafkaCluster cluster, String topic, ConsumerPosition consumerPosition, String query, Integer limit) {
-		int recordsLimit = Optional.ofNullable(limit)
-				.map(s -> Math.min(s, MAX_RECORD_LIMIT))
-				.orElse(DEFAULT_RECORD_LIMIT);
-		RecordEmitter emitter = new RecordEmitter(kafkaService, cluster, topic, consumerPosition);
-		RecordDeserializer recordDeserializer = deserializationService.getRecordDeserializerForCluster(cluster);
-		return Flux.create(emitter::emit)
-				.subscribeOn(Schedulers.boundedElastic())
-				.map(r -> ClusterUtil.mapToTopicMessage(r, recordDeserializer))
-				.filter(m -> filterTopicMessage(m, query))
-				.limitRequest(recordsLimit);
-	}
-
-	public Mono<Map<TopicPartition, Long>> loadOffsets(KafkaCluster cluster, String topicName, List<Integer> partitionsToInclude) {
-		return Mono.fromSupplier(() -> {
-			try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
-				var partitions = consumer.partitionsFor(topicName).stream()
-                        .filter(p -> partitionsToInclude.isEmpty() || partitionsToInclude.contains(p.partition()))
-						.map(p -> new TopicPartition(topicName, p.partition()))
-						.collect(Collectors.toList());
-				var beginningOffsets = consumer.beginningOffsets(partitions);
-				var endOffsets = consumer.endOffsets(partitions);
-				return endOffsets.entrySet().stream()
-						.filter(entry -> !beginningOffsets.get(entry.getKey()).equals(entry.getValue()))
-						.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
-			} catch (Exception e) {
-				log.error("Error occurred while consuming records", e);
-				throw new RuntimeException(e);
-			}
-		});
-	}
-
-	private boolean filterTopicMessage(TopicMessage message, String query) {
-		if (StringUtils.isEmpty(query)) {
-			return true;
-		}
-
-		Object content = message.getContent();
-		JsonNode tree = objectMapper.valueToTree(content);
-		return treeContainsValue(tree, query);
-	}
-
-	private boolean treeContainsValue(JsonNode tree, String query) {
-		LinkedList<JsonNode> nodesForSearch = new LinkedList<>();
-		nodesForSearch.add(tree);
-
-		while (!nodesForSearch.isEmpty()) {
-			JsonNode node = nodesForSearch.removeFirst();
-
-			if (node.isContainerNode()) {
-				node.elements().forEachRemaining(nodesForSearch::add);
-				continue;
-			}
-
-			String nodeValue = node.asText();
-			if (nodeValue.contains(query)) {
-				return true;
-			}
-		}
-
-		return false;
-	}
-
-	@RequiredArgsConstructor
-	private static class RecordEmitter {
-		private static final int MAX_EMPTY_POLLS_COUNT = 3;
-		private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
-
-		private final KafkaService kafkaService;
-		private final KafkaCluster cluster;
-		private final String topic;
-		private final ConsumerPosition consumerPosition;
-
-		public void emit(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
-			try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
-				assignAndSeek(consumer);
-				int emptyPollsCount = 0;
-				log.info("assignment: {}", consumer.assignment());
-				while (!sink.isCancelled()) {
-					ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
-					log.info("{} records polled", records.count());
-					if (records.count() == 0 && emptyPollsCount > MAX_EMPTY_POLLS_COUNT) {
-						break;
-					} else {
-						emptyPollsCount++;
-					}
-					records.iterator()
-							.forEachRemaining(sink::next);
-				}
-				sink.complete();
-			} catch (Exception e) {
-				log.error("Error occurred while consuming records", e);
-				throw new RuntimeException(e);
-			}
-		}
-
-		private List<TopicPartition> getRequestedPartitions() {
-			Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
-
-			return Optional.ofNullable(cluster.getTopics().get(topic))
-					.orElseThrow(() -> new IllegalArgumentException("Unknown topic: " + topic))
-					.getPartitions().values().stream()
-					.filter(internalPartition -> partitionPositions.isEmpty() || partitionPositions.containsKey(internalPartition.getPartition()))
-					.map(partitionInfo -> new TopicPartition(topic, partitionInfo.getPartition()))
-					.collect(Collectors.toList());
-		}
-
-		private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
-			SeekType seekType = consumerPosition.getSeekType();
-			switch (seekType) {
-				case OFFSET:
-					assignAndSeekForOffset(consumer);
-					break;
-				case TIMESTAMP:
-					assignAndSeekForTimestamp(consumer);
-					break;
-				case BEGINNING:
-					assignAndSeekFromBeginning(consumer);
-					break;
-				default:
-					throw new IllegalArgumentException("Unknown seekType: " + seekType);
-			}
-		}
-
-		private void assignAndSeekForOffset(KafkaConsumer<Bytes, Bytes> consumer) {
-			List<TopicPartition> partitions = getRequestedPartitions();
-			consumer.assign(partitions);
-			consumerPosition.getSeekTo().forEach((partition, offset) -> {
-				TopicPartition topicPartition = new TopicPartition(topic, partition);
-				consumer.seek(topicPartition, offset);
-			});
-		}
-
-		private void assignAndSeekForTimestamp(KafkaConsumer<Bytes, Bytes> consumer) {
-			Map<TopicPartition, Long> timestampsToSearch = consumerPosition.getSeekTo().entrySet().stream()
-					.collect(Collectors.toMap(
-							partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
-							Map.Entry::getValue
-					));
-			Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
-					.entrySet().stream()
-					.filter(e -> e.getValue() != null)
-					.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
-
-			if (offsetsForTimestamps.isEmpty()) {
-				throw new IllegalArgumentException("No offsets were found for requested timestamps");
-			}
-
-			consumer.assign(offsetsForTimestamps.keySet());
-			offsetsForTimestamps.forEach(consumer::seek);
-		}
-
-		private void assignAndSeekFromBeginning(KafkaConsumer<Bytes, Bytes> consumer) {
-			List<TopicPartition> partitions = getRequestedPartitions();
-			consumer.assign(partitions);
-			consumer.seekToBeginning(partitions);
-		}
-	}
+  private static final int MAX_RECORD_LIMIT = 100;
+  private static final int DEFAULT_RECORD_LIMIT = 20;
+
+  private final KafkaService kafkaService;
+  private final DeserializationService deserializationService;
+  private final ObjectMapper objectMapper = new ObjectMapper();
+
+  public Flux<TopicMessage> loadMessages(KafkaCluster cluster, String topic,
+                                         ConsumerPosition consumerPosition, String query,
+                                         Integer limit) {
+    int recordsLimit = Optional.ofNullable(limit)
+        .map(s -> Math.min(s, MAX_RECORD_LIMIT))
+        .orElse(DEFAULT_RECORD_LIMIT);
+    RecordEmitter emitter = new RecordEmitter(kafkaService, cluster, topic, consumerPosition);
+    RecordDeserializer recordDeserializer =
+        deserializationService.getRecordDeserializerForCluster(cluster);
+    return Flux.create(emitter::emit)
+        .subscribeOn(Schedulers.boundedElastic())
+        .map(r -> ClusterUtil.mapToTopicMessage(r, recordDeserializer))
+        .filter(m -> filterTopicMessage(m, query))
+        .limitRequest(recordsLimit);
+  }
+
+  public Mono<Map<TopicPartition, Long>> loadOffsets(KafkaCluster cluster, String topicName,
+                                                     List<Integer> partitionsToInclude) {
+    return Mono.fromSupplier(() -> {
+      try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
+        var partitions = consumer.partitionsFor(topicName).stream()
+            .filter(
+                p -> partitionsToInclude.isEmpty() || partitionsToInclude.contains(p.partition()))
+            .map(p -> new TopicPartition(topicName, p.partition()))
+            .collect(Collectors.toList());
+        var beginningOffsets = consumer.beginningOffsets(partitions);
+        var endOffsets = consumer.endOffsets(partitions);
+        return endOffsets.entrySet().stream()
+            .filter(entry -> !beginningOffsets.get(entry.getKey()).equals(entry.getValue()))
+            .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+      } catch (Exception e) {
+        log.error("Error occurred while consuming records", e);
+        throw new RuntimeException(e);
+      }
+    });
+  }
+
+  private boolean filterTopicMessage(TopicMessage message, String query) {
+    if (StringUtils.isEmpty(query)) {
+      return true;
+    }
+
+    Object content = message.getContent();
+    JsonNode tree = objectMapper.valueToTree(content);
+    return treeContainsValue(tree, query);
+  }
+
+  private boolean treeContainsValue(JsonNode tree, String query) {
+    LinkedList<JsonNode> nodesForSearch = new LinkedList<>();
+    nodesForSearch.add(tree);
+
+    while (!nodesForSearch.isEmpty()) {
+      JsonNode node = nodesForSearch.removeFirst();
+
+      if (node.isContainerNode()) {
+        node.elements().forEachRemaining(nodesForSearch::add);
+        continue;
+      }
+
+      String nodeValue = node.asText();
+      if (nodeValue.contains(query)) {
+        return true;
+      }
+    }
+
+    return false;
+  }
+
+  @RequiredArgsConstructor
+  private static class RecordEmitter {
+    private static final int MAX_EMPTY_POLLS_COUNT = 3;
+    private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
+
+    private final KafkaService kafkaService;
+    private final KafkaCluster cluster;
+    private final String topic;
+    private final ConsumerPosition consumerPosition;
+
+    public void emit(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
+      try (KafkaConsumer<Bytes, Bytes> consumer = kafkaService.createConsumer(cluster)) {
+        assignAndSeek(consumer);
+        int emptyPollsCount = 0;
+        log.info("assignment: {}", consumer.assignment());
+        while (!sink.isCancelled()) {
+          ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
+          log.info("{} records polled", records.count());
+          if (records.count() == 0 && emptyPollsCount > MAX_EMPTY_POLLS_COUNT) {
+            break;
+          } else {
+            emptyPollsCount++;
+          }
+          records.iterator()
+              .forEachRemaining(sink::next);
+        }
+        sink.complete();
+      } catch (Exception e) {
+        log.error("Error occurred while consuming records", e);
+        throw new RuntimeException(e);
+      }
+    }
+
+    private List<TopicPartition> getRequestedPartitions() {
+      Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
+
+      return Optional.ofNullable(cluster.getTopics().get(topic))
+          .orElseThrow(() -> new IllegalArgumentException("Unknown topic: " + topic))
+          .getPartitions().values().stream()
+          .filter(internalPartition -> partitionPositions.isEmpty()
+              || partitionPositions.containsKey(internalPartition.getPartition()))
+          .map(partitionInfo -> new TopicPartition(topic, partitionInfo.getPartition()))
+          .collect(Collectors.toList());
+    }
+
+    private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
+      SeekType seekType = consumerPosition.getSeekType();
+      switch (seekType) {
+        case OFFSET:
+          assignAndSeekForOffset(consumer);
+          break;
+        case TIMESTAMP:
+          assignAndSeekForTimestamp(consumer);
+          break;
+        case BEGINNING:
+          assignAndSeekFromBeginning(consumer);
+          break;
+        default:
+          throw new IllegalArgumentException("Unknown seekType: " + seekType);
+      }
+    }
+
+    private void assignAndSeekForOffset(KafkaConsumer<Bytes, Bytes> consumer) {
+      List<TopicPartition> partitions = getRequestedPartitions();
+      consumer.assign(partitions);
+      consumerPosition.getSeekTo().forEach((partition, offset) -> {
+        TopicPartition topicPartition = new TopicPartition(topic, partition);
+        consumer.seek(topicPartition, offset);
+      });
+    }
+
+    private void assignAndSeekForTimestamp(KafkaConsumer<Bytes, Bytes> consumer) {
+      Map<TopicPartition, Long> timestampsToSearch =
+          consumerPosition.getSeekTo().entrySet().stream()
+              .collect(Collectors.toMap(
+                  partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
+                  Map.Entry::getValue
+              ));
+      Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
+          .entrySet().stream()
+          .filter(e -> e.getValue() != null)
+          .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
+
+      if (offsetsForTimestamps.isEmpty()) {
+        throw new IllegalArgumentException("No offsets were found for requested timestamps");
+      }
+
+      consumer.assign(offsetsForTimestamps.keySet());
+      offsetsForTimestamps.forEach(consumer::seek);
+    }
+
+    private void assignAndSeekFromBeginning(KafkaConsumer<Bytes, Bytes> consumer) {
+      List<TopicPartition> partitions = getRequestedPartitions();
+      consumer.assign(partitions);
+      consumer.seekToBeginning(partitions);
+    }
+  }
 }

+ 175 - 156
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaConnectService.java

@@ -4,178 +4,197 @@ import com.provectus.kafka.ui.client.KafkaConnectClients;
 import com.provectus.kafka.ui.exception.NotFoundException;
 import com.provectus.kafka.ui.mapper.ClusterMapper;
 import com.provectus.kafka.ui.mapper.KafkaConnectMapper;
+import com.provectus.kafka.ui.model.Connect;
+import com.provectus.kafka.ui.model.Connector;
+import com.provectus.kafka.ui.model.ConnectorAction;
+import com.provectus.kafka.ui.model.ConnectorPlugin;
+import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.KafkaConnectCluster;
-import com.provectus.kafka.ui.model.*;
+import com.provectus.kafka.ui.model.NewConnector;
+import com.provectus.kafka.ui.model.Task;
+import java.util.Collection;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.stereotype.Service;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 
-import java.util.Collection;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
 @Service
 @Log4j2
 @RequiredArgsConstructor
 public class KafkaConnectService {
-    private final ClustersStorage clustersStorage;
-    private final ClusterMapper clusterMapper;
-    private final KafkaConnectMapper kafkaConnectMapper;
-
-    public Mono<Flux<Connect>> getConnects(String clusterName) {
-        return Mono.just(
-                Flux.fromIterable(clustersStorage.getClusterByName(clusterName)
-                        .map(KafkaCluster::getKafkaConnect).stream()
-                        .flatMap(Collection::stream)
-                        .map(clusterMapper::toKafkaConnect)
-                        .collect(Collectors.toList())
+  private final ClustersStorage clustersStorage;
+  private final ClusterMapper clusterMapper;
+  private final KafkaConnectMapper kafkaConnectMapper;
+
+  public Mono<Flux<Connect>> getConnects(String clusterName) {
+    return Mono.just(
+        Flux.fromIterable(clustersStorage.getClusterByName(clusterName)
+            .map(KafkaCluster::getKafkaConnect).stream()
+            .flatMap(Collection::stream)
+            .map(clusterMapper::toKafkaConnect)
+            .collect(Collectors.toList())
+        )
+    );
+  }
+
+  public Flux<String> getConnectors(String clusterName, String connectName) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMapMany(connect ->
+            KafkaConnectClients.withBaseUrl(connect).getConnectors()
+                .doOnError(log::error)
+        );
+  }
+
+  public Mono<Connector> createConnector(String clusterName, String connectName,
+                                         Mono<NewConnector> connector) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMap(connect ->
+            connector
+                .map(kafkaConnectMapper::toClient)
+                .flatMap(c ->
+                    KafkaConnectClients.withBaseUrl(connect).createConnector(c)
                 )
+                .flatMap(c -> getConnector(clusterName, connectName, c.getName()))
         );
-    }
-
-    public Flux<String> getConnectors(String clusterName, String connectName) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMapMany(connect ->
-                        KafkaConnectClients.withBaseUrl(connect).getConnectors()
-                                .doOnError(log::error)
-                );
-    }
-
-    public Mono<Connector> createConnector(String clusterName, String connectName, Mono<NewConnector> connector) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMap(connect ->
-                        connector
-                                .map(kafkaConnectMapper::toClient)
-                                .flatMap(c ->
-                                        KafkaConnectClients.withBaseUrl(connect).createConnector(c)
-                                )
-                                .flatMap(c -> getConnector(clusterName, connectName, c.getName()))
-                );
-    }
-
-    public Mono<Connector> getConnector(String clusterName, String connectName, String connectorName) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMap(connect ->
-                        KafkaConnectClients.withBaseUrl(connect).getConnector(connectorName)
-                                .map(kafkaConnectMapper::fromClient)
-                                .flatMap(connector ->
-                                        KafkaConnectClients.withBaseUrl(connect).getConnectorStatus(connector.getName())
-                                                .map(connectorStatus -> {
-                                                    var status = connectorStatus.getConnector();
-                                                    connector.status(kafkaConnectMapper.fromClient(status));
-                                                    return (Connector) new Connector()
-                                                            .status(kafkaConnectMapper.fromClient(status))
-                                                            .type(connector.getType())
-                                                            .tasks(connector.getTasks())
-                                                            .name(connector.getName())
-                                                            .config(connector.getConfig());
-                                                })
-                                )
-                );
-    }
-
-    public Mono<Map<String, Object>> getConnectorConfig(String clusterName, String connectName, String connectorName) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMap(connect ->
-                        KafkaConnectClients.withBaseUrl(connect).getConnectorConfig(connectorName)
-                );
-    }
-
-    public Mono<Connector> setConnectorConfig(String clusterName, String connectName, String connectorName, Mono<Object> requestBody) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMap(connect ->
-                        requestBody.flatMap(body ->
-                                KafkaConnectClients.withBaseUrl(connect).setConnectorConfig(connectorName, (Map<String, Object>) body)
-                        )
-                                .map(kafkaConnectMapper::fromClient)
-                );
-    }
-
-    public Mono<Void> deleteConnector(String clusterName, String connectName, String connectorName) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMap(connect ->
-                        KafkaConnectClients.withBaseUrl(connect).deleteConnector(connectorName)
-                );
-    }
-
-    public Mono<Void> updateConnectorState(String clusterName, String connectName, String connectorName, ConnectorAction action) {
-        Function<String, Mono<Void>> kafkaClientCall;
-        switch (action) {
-            case RESTART:
-                kafkaClientCall = connect -> KafkaConnectClients.withBaseUrl(connect).restartConnector(connectorName);
-                break;
-            case PAUSE:
-                kafkaClientCall = connect -> KafkaConnectClients.withBaseUrl(connect).pauseConnector(connectorName);
-                break;
-            case RESUME:
-                kafkaClientCall = connect -> KafkaConnectClients.withBaseUrl(connect).resumeConnector(connectorName);
-                break;
-            default:
-                throw new IllegalStateException("Unexpected value: " + action);
-        }
-        return getConnectAddress(clusterName, connectName)
-                .flatMap(kafkaClientCall);
-    }
-
-    public Flux<Task> getConnectorTasks(String clusterName, String connectName, String connectorName) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMapMany(connect ->
-                        KafkaConnectClients.withBaseUrl(connect).getConnectorTasks(connectorName)
-                                .map(kafkaConnectMapper::fromClient)
-                                .flatMap(task ->
-                                        KafkaConnectClients.withBaseUrl(connect).getConnectorTaskStatus(connectorName, task.getId().getTask())
-                                                .map(kafkaConnectMapper::fromClient)
-                                                .map(task::status)
-                                )
-                );
-    }
-
-    public Mono<Void> restartConnectorTask(String clusterName, String connectName, String connectorName, Integer taskId) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMap(connect ->
-                        KafkaConnectClients.withBaseUrl(connect).restartConnectorTask(connectorName, taskId)
-                );
-    }
-
-    public Mono<Flux<ConnectorPlugin>> getConnectorPlugins(String clusterName, String connectName) {
-        return Mono.just(getConnectAddress(clusterName, connectName)
-                .flatMapMany(connect ->
-                        KafkaConnectClients.withBaseUrl(connect).getConnectorPlugins()
-                                .map(kafkaConnectMapper::fromClient)
-                ));
-    }
+  }
+
+  public Mono<Connector> getConnector(String clusterName, String connectName,
+                                      String connectorName) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMap(connect ->
+            KafkaConnectClients.withBaseUrl(connect).getConnector(connectorName)
+                .map(kafkaConnectMapper::fromClient)
+                .flatMap(connector ->
+                    KafkaConnectClients.withBaseUrl(connect).getConnectorStatus(connector.getName())
+                        .map(connectorStatus -> {
+                          var status = connectorStatus.getConnector();
+                          connector.status(kafkaConnectMapper.fromClient(status));
+                          return (Connector) new Connector()
+                              .status(kafkaConnectMapper.fromClient(status))
+                              .type(connector.getType())
+                              .tasks(connector.getTasks())
+                              .name(connector.getName())
+                              .config(connector.getConfig());
+                        })
+                )
+        );
+  }
 
-    public Mono<ConnectorPluginConfigValidationResponse> validateConnectorPluginConfig(String clusterName, String connectName, String pluginName, Mono<Object> requestBody) {
-        return getConnectAddress(clusterName, connectName)
-                .flatMap(connect ->
-                        requestBody.flatMap(body ->
-                                KafkaConnectClients.withBaseUrl(connect).validateConnectorPluginConfig(pluginName, (Map<String, Object>) body)
-                        )
-                                .map(kafkaConnectMapper::fromClient)
-                );
-    }
+  public Mono<Map<String, Object>> getConnectorConfig(String clusterName, String connectName,
+                                                      String connectorName) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMap(connect ->
+            KafkaConnectClients.withBaseUrl(connect).getConnectorConfig(connectorName)
+        );
+  }
+
+  public Mono<Connector> setConnectorConfig(String clusterName, String connectName,
+                                            String connectorName, Mono<Object> requestBody) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMap(connect ->
+            requestBody.flatMap(body ->
+                KafkaConnectClients.withBaseUrl(connect)
+                    .setConnectorConfig(connectorName, (Map<String, Object>) body)
+            )
+                .map(kafkaConnectMapper::fromClient)
+        );
+  }
 
-    private Mono<KafkaCluster> getCluster(String clusterName) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(Mono::just)
-                .orElse(Mono.error(new NotFoundException("No such cluster")));
+  public Mono<Void> deleteConnector(String clusterName, String connectName, String connectorName) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMap(connect ->
+            KafkaConnectClients.withBaseUrl(connect).deleteConnector(connectorName)
+        );
+  }
+
+  public Mono<Void> updateConnectorState(String clusterName, String connectName,
+                                         String connectorName, ConnectorAction action) {
+    Function<String, Mono<Void>> kafkaClientCall;
+    switch (action) {
+      case RESTART:
+        kafkaClientCall =
+            connect -> KafkaConnectClients.withBaseUrl(connect).restartConnector(connectorName);
+        break;
+      case PAUSE:
+        kafkaClientCall =
+            connect -> KafkaConnectClients.withBaseUrl(connect).pauseConnector(connectorName);
+        break;
+      case RESUME:
+        kafkaClientCall =
+            connect -> KafkaConnectClients.withBaseUrl(connect).resumeConnector(connectorName);
+        break;
+      default:
+        throw new IllegalStateException("Unexpected value: " + action);
     }
-
-    private Mono<String> getConnectAddress(String clusterName, String connectName) {
-        return getCluster(clusterName)
-                .map(kafkaCluster ->
-                        kafkaCluster.getKafkaConnect().stream()
-                                .filter(connect -> connect.getName().equals(connectName))
-                                .findFirst()
-                                .map(KafkaConnectCluster::getAddress)
+    return getConnectAddress(clusterName, connectName)
+        .flatMap(kafkaClientCall);
+  }
+
+  public Flux<Task> getConnectorTasks(String clusterName, String connectName,
+                                      String connectorName) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMapMany(connect ->
+            KafkaConnectClients.withBaseUrl(connect).getConnectorTasks(connectorName)
+                .map(kafkaConnectMapper::fromClient)
+                .flatMap(task ->
+                    KafkaConnectClients.withBaseUrl(connect)
+                        .getConnectorTaskStatus(connectorName, task.getId().getTask())
+                        .map(kafkaConnectMapper::fromClient)
+                        .map(task::status)
                 )
-                .flatMap(connect -> connect
-                        .map(Mono::just)
-                        .orElse(Mono.error(new NotFoundException("No such connect cluster")))
-                );
-    }
+        );
+  }
+
+  public Mono<Void> restartConnectorTask(String clusterName, String connectName,
+                                         String connectorName, Integer taskId) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMap(connect ->
+            KafkaConnectClients.withBaseUrl(connect).restartConnectorTask(connectorName, taskId)
+        );
+  }
+
+  public Mono<Flux<ConnectorPlugin>> getConnectorPlugins(String clusterName, String connectName) {
+    return Mono.just(getConnectAddress(clusterName, connectName)
+        .flatMapMany(connect ->
+            KafkaConnectClients.withBaseUrl(connect).getConnectorPlugins()
+                .map(kafkaConnectMapper::fromClient)
+        ));
+  }
+
+  public Mono<ConnectorPluginConfigValidationResponse> validateConnectorPluginConfig(
+      String clusterName, String connectName, String pluginName, Mono<Object> requestBody) {
+    return getConnectAddress(clusterName, connectName)
+        .flatMap(connect ->
+            requestBody.flatMap(body ->
+                KafkaConnectClients.withBaseUrl(connect)
+                    .validateConnectorPluginConfig(pluginName, (Map<String, Object>) body)
+            )
+                .map(kafkaConnectMapper::fromClient)
+        );
+  }
+
+  private Mono<KafkaCluster> getCluster(String clusterName) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(Mono::just)
+        .orElse(Mono.error(new NotFoundException("No such cluster")));
+  }
+
+  private Mono<String> getConnectAddress(String clusterName, String connectName) {
+    return getCluster(clusterName)
+        .map(kafkaCluster ->
+            kafkaCluster.getKafkaConnect().stream()
+                .filter(connect -> connect.getName().equals(connectName))
+                .findFirst()
+                .map(KafkaConnectCluster::getAddress)
+        )
+        .flatMap(connect -> connect
+            .map(Mono::just)
+            .orElse(Mono.error(new NotFoundException("No such connect cluster")))
+        );
+  }
 }

+ 536 - 463
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java

@@ -1,18 +1,46 @@
 package com.provectus.kafka.ui.service;
 
-import com.provectus.kafka.ui.model.*;
-import com.provectus.kafka.ui.util.ClusterUtil;
-import com.provectus.kafka.ui.util.JmxClusterUtil;
-import com.provectus.kafka.ui.util.JmxMetricsName;
-import com.provectus.kafka.ui.util.JmxMetricsValueName;
 import com.provectus.kafka.ui.model.ConsumerGroup;
+import com.provectus.kafka.ui.model.ExtendedAdminClient;
+import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
+import com.provectus.kafka.ui.model.InternalBrokerMetrics;
+import com.provectus.kafka.ui.model.InternalClusterMetrics;
+import com.provectus.kafka.ui.model.InternalPartition;
+import com.provectus.kafka.ui.model.InternalSegmentSizeDto;
+import com.provectus.kafka.ui.model.InternalTopic;
+import com.provectus.kafka.ui.model.InternalTopicConfig;
+import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.Metric;
 import com.provectus.kafka.ui.model.ServerStatus;
 import com.provectus.kafka.ui.model.TopicFormData;
+import com.provectus.kafka.ui.util.ClusterUtil;
+import com.provectus.kafka.ui.util.JmxClusterUtil;
+import com.provectus.kafka.ui.util.JmxMetricsName;
+import com.provectus.kafka.ui.util.JmxMetricsValueName;
+import java.math.BigDecimal;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.LongSummaryStatistics;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Properties;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import lombok.RequiredArgsConstructor;
 import lombok.SneakyThrows;
 import lombok.extern.log4j.Log4j2;
-import org.apache.kafka.clients.admin.*;
+import org.apache.kafka.clients.admin.AdminClient;
+import org.apache.kafka.clients.admin.AdminClientConfig;
+import org.apache.kafka.clients.admin.AlterConfigOp;
+import org.apache.kafka.clients.admin.Config;
+import org.apache.kafka.clients.admin.ConfigEntry;
+import org.apache.kafka.clients.admin.ConsumerGroupListing;
+import org.apache.kafka.clients.admin.ListTopicsOptions;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.admin.RecordsToDelete;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.common.Node;
@@ -28,473 +56,518 @@ import reactor.util.function.Tuple2;
 import reactor.util.function.Tuple3;
 import reactor.util.function.Tuples;
 
-import java.math.BigDecimal;
-import java.util.*;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
 @Service
 @RequiredArgsConstructor
 @Log4j2
 public class KafkaService {
 
-    @Value("${kafka.admin-client-timeout}")
-    private int clientTimeout;
-
-    private static final ListTopicsOptions LIST_TOPICS_OPTIONS = new ListTopicsOptions().listInternal(true);
-
-    private final ZookeeperService zookeeperService;
-    private final Map<String, ExtendedAdminClient> adminClientCache = new ConcurrentHashMap<>();
-    private final JmxClusterUtil jmxClusterUtil;
-    private final ClustersStorage clustersStorage;
-
-    public KafkaCluster getUpdatedCluster(KafkaCluster cluster, InternalTopic updatedTopic) {
-        final Map<String, InternalTopic> topics = new HashMap<>(cluster.getTopics());
-        topics.put(updatedTopic.getName(), updatedTopic);
-        return cluster.toBuilder().topics(topics).build();
-    }
-
-    public KafkaCluster getUpdatedCluster(KafkaCluster cluster, String topicToDelete) {
-        final Map<String, InternalTopic> topics = new HashMap<>(cluster.getTopics());
-        topics.remove(topicToDelete);
-        return cluster.toBuilder().topics(topics).build();
-    }
-
-    @SneakyThrows
-    public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
-        return getOrCreateAdminClient(cluster)
-                .flatMap(
-                ac -> getClusterMetrics(ac.getAdminClient())
-                        .flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac.getAdminClient()))
-                        .flatMap( clusterMetrics ->
-                            getTopicsData(ac.getAdminClient()).flatMap( it ->
-                                    updateSegmentMetrics(ac.getAdminClient(), clusterMetrics, it)
-                            ).map( segmentSizeDto -> buildFromData(cluster, segmentSizeDto))
-                        )
+  private static final ListTopicsOptions LIST_TOPICS_OPTIONS =
+      new ListTopicsOptions().listInternal(true);
+  private final ZookeeperService zookeeperService;
+  private final Map<String, ExtendedAdminClient> adminClientCache = new ConcurrentHashMap<>();
+  private final JmxClusterUtil jmxClusterUtil;
+  private final ClustersStorage clustersStorage;
+  @Value("${kafka.admin-client-timeout}")
+  private int clientTimeout;
+
+  public KafkaCluster getUpdatedCluster(KafkaCluster cluster, InternalTopic updatedTopic) {
+    final Map<String, InternalTopic> topics = new HashMap<>(cluster.getTopics());
+    topics.put(updatedTopic.getName(), updatedTopic);
+    return cluster.toBuilder().topics(topics).build();
+  }
+
+  public KafkaCluster getUpdatedCluster(KafkaCluster cluster, String topicToDelete) {
+    final Map<String, InternalTopic> topics = new HashMap<>(cluster.getTopics());
+    topics.remove(topicToDelete);
+    return cluster.toBuilder().topics(topics).build();
+  }
+
+  @SneakyThrows
+  public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
+    return getOrCreateAdminClient(cluster)
+        .flatMap(
+            ac -> getClusterMetrics(ac.getAdminClient())
+                .flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac.getAdminClient()))
+                .flatMap(clusterMetrics ->
+                    getTopicsData(ac.getAdminClient()).flatMap(it ->
+                        updateSegmentMetrics(ac.getAdminClient(), clusterMetrics, it)
+                    ).map(segmentSizeDto -> buildFromData(cluster, segmentSizeDto))
+                )
         ).onErrorResume(
-                e -> Mono.just(cluster.toBuilder()
-                        .status(ServerStatus.OFFLINE)
-                        .lastKafkaException(e)
-                        .build())
+            e -> Mono.just(cluster.toBuilder()
+                .status(ServerStatus.OFFLINE)
+                .lastKafkaException(e)
+                .build())
         );
-    }
-
-    private KafkaCluster buildFromData(KafkaCluster currentCluster, InternalSegmentSizeDto segmentSizeDto) {
-
-        var topics = segmentSizeDto.getInternalTopicWithSegmentSize();
-        var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize();
-
-        InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = brokersMetrics.toBuilder();
-
-        InternalClusterMetrics topicsMetrics = collectTopicsMetrics(topics);
-
-        ServerStatus zookeeperStatus = ServerStatus.OFFLINE;
-        Throwable zookeeperException = null;
-        try {
-            zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster) ? ServerStatus.ONLINE : ServerStatus.OFFLINE;
-        } catch (Throwable e) {
-            zookeeperException = e;
-        }
-
-        InternalClusterMetrics clusterMetrics = metricsBuilder
-                .activeControllers(brokersMetrics.getActiveControllers())
-                .topicCount(topicsMetrics.getTopicCount())
-                .brokerCount(brokersMetrics.getBrokerCount())
-                .underReplicatedPartitionCount(topicsMetrics.getUnderReplicatedPartitionCount())
-                .inSyncReplicasCount(topicsMetrics.getInSyncReplicasCount())
-                .outOfSyncReplicasCount(topicsMetrics.getOutOfSyncReplicasCount())
-                .onlinePartitionCount(topicsMetrics.getOnlinePartitionCount())
-                .offlinePartitionCount(topicsMetrics.getOfflinePartitionCount())
-                .zooKeeperStatus(ClusterUtil.convertToIntServerStatus(zookeeperStatus))
-                .build();
-
-        return currentCluster.toBuilder()
-                .status(ServerStatus.ONLINE)
-                .zookeeperStatus(zookeeperStatus)
-                .lastZookeeperException(zookeeperException)
-                .lastKafkaException(null)
-                .metrics(clusterMetrics)
-                .topics(topics)
-                .build();
-    }
-
-    private InternalClusterMetrics collectTopicsMetrics(Map<String,InternalTopic> topics) {
-
-        int underReplicatedPartitions = 0;
-        int inSyncReplicasCount = 0;
-        int outOfSyncReplicasCount = 0;
-        int onlinePartitionCount = 0;
-        int offlinePartitionCount = 0;
-
-        for (InternalTopic topic : topics.values()) {
-            underReplicatedPartitions += topic.getUnderReplicatedPartitions();
-            inSyncReplicasCount += topic.getInSyncReplicas();
-            outOfSyncReplicasCount += (topic.getReplicas() - topic.getInSyncReplicas());
-            onlinePartitionCount += topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() == null ? 0 : 1).sum();
-            offlinePartitionCount += topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() != null ? 0 : 1).sum();
-        }
-
-        return InternalClusterMetrics.builder()
-                .underReplicatedPartitionCount(underReplicatedPartitions)
-                .inSyncReplicasCount(inSyncReplicasCount)
-                .outOfSyncReplicasCount(outOfSyncReplicasCount)
-                .onlinePartitionCount(onlinePartitionCount)
-                .offlinePartitionCount(offlinePartitionCount)
-                .topicCount(topics.size())
-                .build();
-    }
-
-    private Map<String, InternalTopic> mergeWithConfigs(List<InternalTopic> topics, Map<String, List<InternalTopicConfig>> configs) {
-        return topics.stream().map(
-                t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build()
-        ).collect(Collectors.toMap(
-                InternalTopic::getName,
-                e -> e
-        ));
-    }
-
-    @SneakyThrows
-    private Mono<List<InternalTopic>> getTopicsData(AdminClient adminClient) {
-        return ClusterUtil.toMono(adminClient.listTopics(LIST_TOPICS_OPTIONS).names())
-                    .flatMap(topics -> getTopicsData(adminClient, topics).collectList());
-    }
-
-    private Flux<InternalTopic> getTopicsData(AdminClient adminClient, Collection<String> topics) {
-        final Mono<Map<String, List<InternalTopicConfig>>> configsMono = loadTopicsConfig(adminClient, topics);
-
-        return ClusterUtil.toMono(adminClient.describeTopics(topics).all()).map(
-                m -> m.values().stream().map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList())
-        ).flatMap( internalTopics -> configsMono.map(configs ->
-                mergeWithConfigs(internalTopics, configs).values()
-        )).flatMapMany(Flux::fromIterable);
-    }
-
-
-    private Mono<InternalClusterMetrics> getClusterMetrics(AdminClient client) {
-        return ClusterUtil.toMono(client.describeCluster().nodes())
-                .flatMap(brokers ->
-                    ClusterUtil.toMono(client.describeCluster().controller()).map(
-                        c -> {
-                            InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder = InternalClusterMetrics.builder();
-                            metricsBuilder.brokerCount(brokers.size()).activeControllers(c != null ? 1 : 0);
-                            return metricsBuilder.build();
-                        }
-                    )
-                );
-    }
-
-
-    public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicFormData> topicFormData) {
-        return getOrCreateAdminClient(cluster).flatMap(ac -> createTopic(ac.getAdminClient(), topicFormData));
-    }
-
-    public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
-        return getOrCreateAdminClient(cluster)
-                .map(ExtendedAdminClient::getAdminClient)
-                .map(adminClient -> adminClient.deleteTopics(List.of(topicName)))
-                .then();
-    }
-
-    @SneakyThrows
-    public Mono<InternalTopic> createTopic(AdminClient adminClient, Mono<TopicFormData> topicFormData) {
-        return topicFormData.flatMap(
-                topicData -> {
-                    NewTopic newTopic = new NewTopic(topicData.getName(), topicData.getPartitions(), topicData.getReplicationFactor().shortValue());
-                    newTopic.configs(topicData.getConfigs());
-                    return createTopic(adminClient, newTopic).map( v -> topicData);
-                }).flatMap(topicData -> getTopicsData(adminClient, Collections.singleton(topicData.getName())).next())
-                .switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")))
-                .flatMap( t ->
-                        loadTopicsConfig(adminClient, Collections.singletonList(t.getName()))
-                                .map( c -> mergeWithConfigs(Collections.singletonList(t), c))
-                                .map( m -> m.values().iterator().next())
-                );
-    }
-
-    @SneakyThrows
-    public Mono<ExtendedAdminClient> getOrCreateAdminClient(KafkaCluster cluster) {
-        return Mono.justOrEmpty(adminClientCache.get(cluster.getName()))
-                .switchIfEmpty(createAdminClient(cluster))
-                .map(e -> adminClientCache.computeIfAbsent(cluster.getName(), key -> e));
-    }
-
-    public Mono<ExtendedAdminClient> createAdminClient(KafkaCluster kafkaCluster) {
-        return Mono.fromSupplier(() -> {
-            Properties properties = new Properties();
-            properties.putAll(kafkaCluster.getProperties());
-            properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.getBootstrapServers());
-            properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
-            return AdminClient.create(properties);
-        }).flatMap(ExtendedAdminClient::extendedAdminClient);
-    }
-
-    @SneakyThrows
-    private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(AdminClient adminClient, Collection<String> topicNames) {
-        List<ConfigResource> resources = topicNames.stream()
-                .map(topicName -> new ConfigResource(ConfigResource.Type.TOPIC, topicName))
-                .collect(Collectors.toList());
-
-        return ClusterUtil.toMono(adminClient.describeConfigs(resources).all())
-                .map(configs ->
-                        configs.entrySet().stream().map(
-                                c -> Tuples.of(
-                                        c.getKey().name(),
-                                        c.getValue().entries().stream().map(ClusterUtil::mapToInternalTopicConfig).collect(Collectors.toList())
-                                )
-                        ).collect(Collectors.toMap(
-                                Tuple2::getT1,
-                                Tuple2::getT2
-                        ))
-                );
-    }
-
-    public Mono<List<ConsumerGroup>> getConsumerGroups(KafkaCluster cluster) {
-        return getOrCreateAdminClient(cluster).flatMap(ac -> ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
-                .flatMap(s -> ClusterUtil.toMono(ac.getAdminClient()
-                        .describeConsumerGroups(s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())).all()))
-                .map(s -> s.values().stream()
-                        .map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList())));
-    }
-
-    public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
-        Properties props = new Properties();
-        props.putAll(cluster.getProperties());
-        props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui");
-        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
-        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
-        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
-        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
-
-        return new KafkaConsumer<>(props);
-    }
-
-
-    @SneakyThrows
-    private Mono<String> createTopic(AdminClient adminClient, NewTopic newTopic) {
-        return ClusterUtil.toMono(adminClient.createTopics(Collections.singletonList(newTopic)).all(), newTopic.name());
-    }
-
-    @SneakyThrows
-    public Mono<InternalTopic> updateTopic(KafkaCluster cluster, String topicName, TopicFormData topicFormData) {
-        ConfigResource topicCR = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
-        return getOrCreateAdminClient(cluster)
-                .flatMap(ac -> {
-                    if (ac.getSupportedFeatures().contains(ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS)) {
-                        return incrementalAlterConfig(topicFormData, topicCR, ac)
-                                .flatMap(c -> getUpdatedTopic(ac, topicName));
-                    } else {
-                        return alterConfig(topicFormData, topicCR, ac)
-                                .flatMap(c -> getUpdatedTopic(ac, topicName));
-                    }
-                });
-    }
-
-    private Mono<InternalTopic> getUpdatedTopic (ExtendedAdminClient ac, String topicName) {
-        return getTopicsData(ac.getAdminClient())
-                .map(s -> s.stream()
-                        .filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow());
-    }
-
-    private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigResource topicCR, ExtendedAdminClient ac) {
-        List<AlterConfigOp> listOp = topicFormData.getConfigs().entrySet().stream()
-                .flatMap(cfg -> Stream.of(new AlterConfigOp(new ConfigEntry(cfg.getKey(), cfg.getValue()), AlterConfigOp.OpType.SET))).collect(Collectors.toList());
-        return ClusterUtil.toMono(ac.getAdminClient().incrementalAlterConfigs(Collections.singletonMap(topicCR, listOp)).all(), topicCR.name());
-    }
-
-    private Mono<String> alterConfig(TopicFormData topicFormData, ConfigResource topicCR, ExtendedAdminClient ac) {
-        List<ConfigEntry> configEntries = topicFormData.getConfigs().entrySet().stream()
-                .flatMap(cfg -> Stream.of(new ConfigEntry(cfg.getKey(), cfg.getValue()))).collect(Collectors.toList());
-        Config config = new Config(configEntries);
-        Map<ConfigResource, Config> map = Collections.singletonMap(topicCR, config);
-        return ClusterUtil.toMono(ac.getAdminClient().alterConfigs(map).all(), topicCR.name());
-
-    }
-
-    private InternalTopic mergeWithStats(InternalTopic topic, Map<String, LongSummaryStatistics> topics, Map<TopicPartition, LongSummaryStatistics> partitions) {
-        final LongSummaryStatistics stats = topics.get(topic.getName());
-
-        return topic.toBuilder()
-                .segmentSize(stats.getSum())
-                .segmentCount(stats.getCount())
-                .partitions(
-                        topic.getPartitions().entrySet().stream().map(e ->
-                                Tuples.of(e.getKey(), mergeWithStats(topic.getName(), e.getValue(), partitions))
-                        ).collect(Collectors.toMap(
-                                Tuple2::getT1,
-                                Tuple2::getT2
-                        ))
-                ).build();
-    }
-
-    private InternalPartition mergeWithStats(String topic, InternalPartition partition, Map<TopicPartition, LongSummaryStatistics> partitions) {
-        final LongSummaryStatistics stats = partitions.get(new TopicPartition(topic, partition.getPartition()));
-        return partition.toBuilder()
-                .segmentSize(stats.getSum())
-                .segmentCount(stats.getCount())
-                .build();
-    }
-
-    private Mono<InternalSegmentSizeDto> updateSegmentMetrics(AdminClient ac, InternalClusterMetrics clusterMetrics, List<InternalTopic> internalTopics) {
-        List<String> names = internalTopics.stream().map(InternalTopic::getName).collect(Collectors.toList());
-        return ClusterUtil.toMono(ac.describeTopics(names).all()).flatMap(topic ->
-            ClusterUtil.toMono(ac.describeCluster().nodes()).flatMap( nodes ->
-                    ClusterUtil.toMono(ac.describeLogDirs(nodes.stream().map(Node::id).collect(Collectors.toList())).all())
-                            .map(log -> {
-                                final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
-                                        log.entrySet().stream().flatMap(b ->
-                                                b.getValue().entrySet().stream().flatMap(topicMap ->
-                                                        topicMap.getValue().replicaInfos.entrySet().stream()
-                                                                .map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
-                                                )
-                                        ).collect(Collectors.toList());
-
-                                final Map<TopicPartition, LongSummaryStatistics> partitionStats = topicPartitions.stream().collect(
-                                        Collectors.groupingBy(
-                                                Tuple2::getT2,
-                                                Collectors.summarizingLong(Tuple3::getT3)
-                                        )
-                                );
-
-                                final Map<String, LongSummaryStatistics> topicStats = topicPartitions.stream().collect(
-                                        Collectors.groupingBy(
-                                                t -> t.getT2().topic(),
-                                                Collectors.summarizingLong(Tuple3::getT3)
-                                        )
-                                );
-
-                                final Map<Integer, LongSummaryStatistics> brokerStats = topicPartitions.stream().collect(
-                                        Collectors.groupingBy(
-                                                t -> t.getT1(),
-                                                Collectors.summarizingLong(Tuple3::getT3)
-                                        )
-                                );
-
-
-                                final LongSummaryStatistics summary = topicPartitions.stream().collect(Collectors.summarizingLong(Tuple3::getT3));
-
-
-                                final Map<String, InternalTopic> resultTopics = internalTopics.stream().map(e ->
-                                        Tuples.of(e.getName(), mergeWithStats(e, topicStats, partitionStats))
-                                ).collect(Collectors.toMap(
-                                        Tuple2::getT1,
-                                        Tuple2::getT2
-                                ));
-
-                                final Map<Integer, InternalBrokerDiskUsage> resultBrokers = brokerStats.entrySet().stream().map(e ->
-                                        Tuples.of(e.getKey(), InternalBrokerDiskUsage.builder()
-                                                .segmentSize(e.getValue().getSum())
-                                                .segmentCount(e.getValue().getCount())
-                                                .build()
-                                        )
-                                ).collect(Collectors.toMap(
-                                        Tuple2::getT1,
-                                        Tuple2::getT2
-                                ));
-
-                                return InternalSegmentSizeDto.builder()
-                                        .clusterMetricsWithSegmentSize(
-                                                clusterMetrics.toBuilder()
-                                                        .segmentSize(summary.getSum())
-                                                        .segmentCount(summary.getCount())
-                                                        .internalBrokerDiskUsage(resultBrokers)
-                                                        .build()
-                                        )
-                                        .internalTopicWithSegmentSize(resultTopics).build();
-                            })
+  }
+
+  private KafkaCluster buildFromData(KafkaCluster currentCluster,
+                                     InternalSegmentSizeDto segmentSizeDto) {
+
+    var topics = segmentSizeDto.getInternalTopicWithSegmentSize();
+    var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize();
+
+    InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
+        brokersMetrics.toBuilder();
+
+    InternalClusterMetrics topicsMetrics = collectTopicsMetrics(topics);
+
+    ServerStatus zookeeperStatus = ServerStatus.OFFLINE;
+    Throwable zookeeperException = null;
+    try {
+      zookeeperStatus = zookeeperService.isZookeeperOnline(currentCluster) ? ServerStatus.ONLINE :
+          ServerStatus.OFFLINE;
+    } catch (Throwable e) {
+      zookeeperException = e;
+    }
+
+    InternalClusterMetrics clusterMetrics = metricsBuilder
+        .activeControllers(brokersMetrics.getActiveControllers())
+        .topicCount(topicsMetrics.getTopicCount())
+        .brokerCount(brokersMetrics.getBrokerCount())
+        .underReplicatedPartitionCount(topicsMetrics.getUnderReplicatedPartitionCount())
+        .inSyncReplicasCount(topicsMetrics.getInSyncReplicasCount())
+        .outOfSyncReplicasCount(topicsMetrics.getOutOfSyncReplicasCount())
+        .onlinePartitionCount(topicsMetrics.getOnlinePartitionCount())
+        .offlinePartitionCount(topicsMetrics.getOfflinePartitionCount())
+        .zooKeeperStatus(ClusterUtil.convertToIntServerStatus(zookeeperStatus))
+        .build();
+
+    return currentCluster.toBuilder()
+        .status(ServerStatus.ONLINE)
+        .zookeeperStatus(zookeeperStatus)
+        .lastZookeeperException(zookeeperException)
+        .lastKafkaException(null)
+        .metrics(clusterMetrics)
+        .topics(topics)
+        .build();
+  }
+
+  private InternalClusterMetrics collectTopicsMetrics(Map<String, InternalTopic> topics) {
+
+    int underReplicatedPartitions = 0;
+    int inSyncReplicasCount = 0;
+    int outOfSyncReplicasCount = 0;
+    int onlinePartitionCount = 0;
+    int offlinePartitionCount = 0;
+
+    for (InternalTopic topic : topics.values()) {
+      underReplicatedPartitions += topic.getUnderReplicatedPartitions();
+      inSyncReplicasCount += topic.getInSyncReplicas();
+      outOfSyncReplicasCount += (topic.getReplicas() - topic.getInSyncReplicas());
+      onlinePartitionCount +=
+          topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() == null ? 0 : 1)
+              .sum();
+      offlinePartitionCount +=
+          topic.getPartitions().values().stream().mapToInt(s -> s.getLeader() != null ? 0 : 1)
+              .sum();
+    }
+
+    return InternalClusterMetrics.builder()
+        .underReplicatedPartitionCount(underReplicatedPartitions)
+        .inSyncReplicasCount(inSyncReplicasCount)
+        .outOfSyncReplicasCount(outOfSyncReplicasCount)
+        .onlinePartitionCount(onlinePartitionCount)
+        .offlinePartitionCount(offlinePartitionCount)
+        .topicCount(topics.size())
+        .build();
+  }
+
+  private Map<String, InternalTopic> mergeWithConfigs(
+      List<InternalTopic> topics, Map<String, List<InternalTopicConfig>> configs) {
+    return topics.stream().map(
+        t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build()
+    ).collect(Collectors.toMap(
+        InternalTopic::getName,
+        e -> e
+    ));
+  }
+
+  @SneakyThrows
+  private Mono<List<InternalTopic>> getTopicsData(AdminClient adminClient) {
+    return ClusterUtil.toMono(adminClient.listTopics(LIST_TOPICS_OPTIONS).names())
+        .flatMap(topics -> getTopicsData(adminClient, topics).collectList());
+  }
+
+  private Flux<InternalTopic> getTopicsData(AdminClient adminClient, Collection<String> topics) {
+    final Mono<Map<String, List<InternalTopicConfig>>> configsMono =
+        loadTopicsConfig(adminClient, topics);
+
+    return ClusterUtil.toMono(adminClient.describeTopics(topics).all()).map(
+        m -> m.values().stream().map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList())
+    ).flatMap(internalTopics -> configsMono.map(configs ->
+        mergeWithConfigs(internalTopics, configs).values()
+    )).flatMapMany(Flux::fromIterable);
+  }
+
+
+  private Mono<InternalClusterMetrics> getClusterMetrics(AdminClient client) {
+    return ClusterUtil.toMono(client.describeCluster().nodes())
+        .flatMap(brokers ->
+            ClusterUtil.toMono(client.describeCluster().controller()).map(
+                c -> {
+                  InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
+                      InternalClusterMetrics.builder();
+                  metricsBuilder.brokerCount(brokers.size()).activeControllers(c != null ? 1 : 0);
+                  return metricsBuilder.build();
+                }
             )
         );
-    }
-
-    public List<Metric> getJmxMetric(String clusterName, Node node) {
-        return clustersStorage.getClusterByName(clusterName)
-                        .filter( c -> c.getJmxPort() != null)
-                        .filter( c -> c.getJmxPort() > 0)
-                        .map(c -> jmxClusterUtil.getJmxMetrics(c.getJmxPort(), node.host())).orElse(Collections.emptyList());
-    }
-
-    private Mono<InternalClusterMetrics> fillJmxMetrics (InternalClusterMetrics internalClusterMetrics, String clusterName, AdminClient ac) {
-        return fillBrokerMetrics(internalClusterMetrics, clusterName, ac).map(this::calculateClusterMetrics);
-    }
-
-    private Mono<InternalClusterMetrics> fillBrokerMetrics(InternalClusterMetrics internalClusterMetrics, String clusterName, AdminClient ac) {
-        return ClusterUtil.toMono(ac.describeCluster().nodes())
-                .flatMapIterable(nodes -> nodes)
-                .map(broker -> Map.of(broker.id(), InternalBrokerMetrics.builder().
-                        metrics(getJmxMetric(clusterName, broker)).build()))
-                .collectList()
-                .map(s -> internalClusterMetrics.toBuilder().internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build());
-    }
-
-    private InternalClusterMetrics calculateClusterMetrics(InternalClusterMetrics internalClusterMetrics) {
-        final List<Metric> metrics = internalClusterMetrics.getInternalBrokerMetrics().values().stream()
-                .flatMap(b -> b.getMetrics().stream())
-                .collect(
-                        Collectors.groupingBy(
-                                Metric::getCanonicalName,
-                                Collectors.reducing(jmxClusterUtil::reduceJmxMetrics)
-                        )
-                ).values().stream()
-                .filter(Optional::isPresent)
-                .map(Optional::get)
-                .collect(Collectors.toList());
-        final InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
-                internalClusterMetrics.toBuilder().metrics(metrics);
-        metricsBuilder.bytesInPerSec(findTopicMetrics(
-                metrics, JmxMetricsName.BytesInPerSec, JmxMetricsValueName.FiveMinuteRate
+  }
+
+  @SneakyThrows
+  private Mono<String> createTopic(AdminClient adminClient, NewTopic newTopic) {
+    return ClusterUtil.toMono(adminClient.createTopics(Collections.singletonList(newTopic)).all(),
+        newTopic.name());
+  }
+
+  @SneakyThrows
+  public Mono<InternalTopic> createTopic(AdminClient adminClient,
+                                         Mono<TopicFormData> topicFormData) {
+    return topicFormData.flatMap(
+        topicData -> {
+          NewTopic newTopic = new NewTopic(topicData.getName(), topicData.getPartitions(),
+              topicData.getReplicationFactor().shortValue());
+          newTopic.configs(topicData.getConfigs());
+          return createTopic(adminClient, newTopic).map(v -> topicData);
+        }).flatMap(
+          topicData ->
+              getTopicsData(adminClient, Collections.singleton(topicData.getName()))
+                  .next()
+        ).switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")))
+        .flatMap(t ->
+            loadTopicsConfig(adminClient, Collections.singletonList(t.getName()))
+                .map(c -> mergeWithConfigs(Collections.singletonList(t), c))
+                .map(m -> m.values().iterator().next())
+        );
+  }
+
+  public Mono<InternalTopic> createTopic(KafkaCluster cluster, Mono<TopicFormData> topicFormData) {
+    return getOrCreateAdminClient(cluster)
+        .flatMap(ac -> createTopic(ac.getAdminClient(), topicFormData));
+  }
+
+  public Mono<Void> deleteTopic(KafkaCluster cluster, String topicName) {
+    return getOrCreateAdminClient(cluster)
+        .map(ExtendedAdminClient::getAdminClient)
+        .map(adminClient -> adminClient.deleteTopics(List.of(topicName)))
+        .then();
+  }
+
+
+  @SneakyThrows
+  public Mono<ExtendedAdminClient> getOrCreateAdminClient(KafkaCluster cluster) {
+    return Mono.justOrEmpty(adminClientCache.get(cluster.getName()))
+        .switchIfEmpty(createAdminClient(cluster))
+        .map(e -> adminClientCache.computeIfAbsent(cluster.getName(), key -> e));
+  }
+
+  public Mono<ExtendedAdminClient> createAdminClient(KafkaCluster kafkaCluster) {
+    return Mono.fromSupplier(() -> {
+      Properties properties = new Properties();
+      properties.putAll(kafkaCluster.getProperties());
+      properties
+          .put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.getBootstrapServers());
+      properties.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, clientTimeout);
+      return AdminClient.create(properties);
+    }).flatMap(ExtendedAdminClient::extendedAdminClient);
+  }
+
+  @SneakyThrows
+  private Mono<Map<String, List<InternalTopicConfig>>> loadTopicsConfig(
+      AdminClient adminClient, Collection<String> topicNames) {
+    List<ConfigResource> resources = topicNames.stream()
+        .map(topicName -> new ConfigResource(ConfigResource.Type.TOPIC, topicName))
+        .collect(Collectors.toList());
+
+    return ClusterUtil.toMono(adminClient.describeConfigs(resources).all())
+        .map(configs ->
+            configs.entrySet().stream().map(
+                c -> Tuples.of(
+                    c.getKey().name(),
+                    c.getValue().entries().stream().map(ClusterUtil::mapToInternalTopicConfig)
+                        .collect(Collectors.toList())
+                )
+            ).collect(Collectors.toMap(
+                Tuple2::getT1,
+                Tuple2::getT2
+            ))
+        );
+  }
+
+  public Mono<List<ConsumerGroup>> getConsumerGroups(KafkaCluster cluster) {
+    return getOrCreateAdminClient(cluster)
+        .flatMap(ac -> ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
+            .flatMap(s -> ClusterUtil.toMono(ac.getAdminClient()
+                .describeConsumerGroups(
+                    s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList()))
+                .all()))
+            .map(s -> s.values().stream()
+                .map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList())));
+  }
+
+  public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
+    Properties props = new Properties();
+    props.putAll(cluster.getProperties());
+    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui");
+    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
+    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
+    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
+    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+
+    return new KafkaConsumer<>(props);
+  }
+
+  @SneakyThrows
+  public Mono<InternalTopic> updateTopic(KafkaCluster cluster, String topicName,
+                                         TopicFormData topicFormData) {
+    ConfigResource topicCr = new ConfigResource(ConfigResource.Type.TOPIC, topicName);
+    return getOrCreateAdminClient(cluster)
+        .flatMap(ac -> {
+          if (ac.getSupportedFeatures()
+              .contains(ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS)) {
+            return incrementalAlterConfig(topicFormData, topicCr, ac)
+                .flatMap(c -> getUpdatedTopic(ac, topicName));
+          } else {
+            return alterConfig(topicFormData, topicCr, ac)
+                .flatMap(c -> getUpdatedTopic(ac, topicName));
+          }
+        });
+  }
+
+  private Mono<InternalTopic> getUpdatedTopic(ExtendedAdminClient ac, String topicName) {
+    return getTopicsData(ac.getAdminClient())
+        .map(s -> s.stream()
+            .filter(t -> t.getName().equals(topicName)).findFirst().orElseThrow());
+  }
+
+  private Mono<String> incrementalAlterConfig(TopicFormData topicFormData, ConfigResource topicCr,
+                                              ExtendedAdminClient ac) {
+    List<AlterConfigOp> listOp = topicFormData.getConfigs().entrySet().stream()
+        .flatMap(cfg -> Stream.of(new AlterConfigOp(new ConfigEntry(cfg.getKey(), cfg.getValue()),
+            AlterConfigOp.OpType.SET))).collect(Collectors.toList());
+    return ClusterUtil.toMono(
+        ac.getAdminClient().incrementalAlterConfigs(Collections.singletonMap(topicCr, listOp))
+            .all(), topicCr.name());
+  }
+
+  private Mono<String> alterConfig(TopicFormData topicFormData, ConfigResource topicCr,
+                                   ExtendedAdminClient ac) {
+    List<ConfigEntry> configEntries = topicFormData.getConfigs().entrySet().stream()
+        .flatMap(cfg -> Stream.of(new ConfigEntry(cfg.getKey(), cfg.getValue())))
+        .collect(Collectors.toList());
+    Config config = new Config(configEntries);
+    Map<ConfigResource, Config> map = Collections.singletonMap(topicCr, config);
+    return ClusterUtil.toMono(ac.getAdminClient().alterConfigs(map).all(), topicCr.name());
+
+  }
+
+  private InternalTopic mergeWithStats(InternalTopic topic,
+                                       Map<String, LongSummaryStatistics> topics,
+                                       Map<TopicPartition, LongSummaryStatistics> partitions) {
+    final LongSummaryStatistics stats = topics.get(topic.getName());
+
+    return topic.toBuilder()
+        .segmentSize(stats.getSum())
+        .segmentCount(stats.getCount())
+        .partitions(
+            topic.getPartitions().entrySet().stream().map(e ->
+                Tuples.of(e.getKey(), mergeWithStats(topic.getName(), e.getValue(), partitions))
+            ).collect(Collectors.toMap(
+                Tuple2::getT1,
+                Tuple2::getT2
+            ))
+        ).build();
+  }
+
+  private InternalPartition mergeWithStats(String topic, InternalPartition partition,
+                                           Map<TopicPartition, LongSummaryStatistics> partitions) {
+    final LongSummaryStatistics stats =
+        partitions.get(new TopicPartition(topic, partition.getPartition()));
+    return partition.toBuilder()
+        .segmentSize(stats.getSum())
+        .segmentCount(stats.getCount())
+        .build();
+  }
+
+  private Mono<InternalSegmentSizeDto> updateSegmentMetrics(AdminClient ac,
+                                                            InternalClusterMetrics clusterMetrics,
+                                                            List<InternalTopic> internalTopics) {
+    List<String> names =
+        internalTopics.stream().map(InternalTopic::getName).collect(Collectors.toList());
+    return ClusterUtil.toMono(ac.describeTopics(names).all()).flatMap(topic ->
+        ClusterUtil.toMono(ac.describeCluster().nodes()).flatMap(nodes ->
+            ClusterUtil.toMono(
+                ac.describeLogDirs(nodes.stream().map(Node::id).collect(Collectors.toList())).all())
+                .map(log -> {
+                  final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
+                      log.entrySet().stream().flatMap(b ->
+                          b.getValue().entrySet().stream().flatMap(topicMap ->
+                              topicMap.getValue().replicaInfos.entrySet().stream()
+                                  .map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
+                          )
+                      ).collect(Collectors.toList());
+
+                  final Map<TopicPartition, LongSummaryStatistics> partitionStats =
+                      topicPartitions.stream().collect(
+                          Collectors.groupingBy(
+                              Tuple2::getT2,
+                              Collectors.summarizingLong(Tuple3::getT3)
+                          )
+                      );
+
+                  final Map<String, LongSummaryStatistics> topicStats =
+                      topicPartitions.stream().collect(
+                          Collectors.groupingBy(
+                              t -> t.getT2().topic(),
+                              Collectors.summarizingLong(Tuple3::getT3)
+                          )
+                      );
+
+                  final Map<Integer, LongSummaryStatistics> brokerStats =
+                      topicPartitions.stream().collect(
+                          Collectors.groupingBy(
+                              t -> t.getT1(),
+                              Collectors.summarizingLong(Tuple3::getT3)
+                          )
+                      );
+
+
+                  final LongSummaryStatistics summary =
+                      topicPartitions.stream().collect(Collectors.summarizingLong(Tuple3::getT3));
+
+
+                  final Map<String, InternalTopic> resultTopics = internalTopics.stream().map(e ->
+                      Tuples.of(e.getName(), mergeWithStats(e, topicStats, partitionStats))
+                  ).collect(Collectors.toMap(
+                      Tuple2::getT1,
+                      Tuple2::getT2
+                  ));
+
+                  final Map<Integer, InternalBrokerDiskUsage> resultBrokers =
+                      brokerStats.entrySet().stream().map(e ->
+                          Tuples.of(e.getKey(), InternalBrokerDiskUsage.builder()
+                              .segmentSize(e.getValue().getSum())
+                              .segmentCount(e.getValue().getCount())
+                              .build()
+                          )
+                      ).collect(Collectors.toMap(
+                          Tuple2::getT1,
+                          Tuple2::getT2
+                      ));
+
+                  return InternalSegmentSizeDto.builder()
+                      .clusterMetricsWithSegmentSize(
+                          clusterMetrics.toBuilder()
+                              .segmentSize(summary.getSum())
+                              .segmentCount(summary.getCount())
+                              .internalBrokerDiskUsage(resultBrokers)
+                              .build()
+                      )
+                      .internalTopicWithSegmentSize(resultTopics).build();
+                })
+        )
+    );
+  }
+
+  public List<Metric> getJmxMetric(String clusterName, Node node) {
+    return clustersStorage.getClusterByName(clusterName)
+        .filter(c -> c.getJmxPort() != null)
+        .filter(c -> c.getJmxPort() > 0)
+        .map(c -> jmxClusterUtil.getJmxMetrics(c.getJmxPort(), node.host()))
+        .orElse(Collections.emptyList());
+  }
+
+  private Mono<InternalClusterMetrics> fillJmxMetrics(InternalClusterMetrics internalClusterMetrics,
+                                                      String clusterName, AdminClient ac) {
+    return fillBrokerMetrics(internalClusterMetrics, clusterName, ac)
+        .map(this::calculateClusterMetrics);
+  }
+
+  private Mono<InternalClusterMetrics> fillBrokerMetrics(
+      InternalClusterMetrics internalClusterMetrics, String clusterName, AdminClient ac) {
+    return ClusterUtil.toMono(ac.describeCluster().nodes())
+        .flatMapIterable(nodes -> nodes)
+        .map(broker ->
+            Map.of(broker.id(), InternalBrokerMetrics.builder()
+                .metrics(getJmxMetric(clusterName, broker)).build())
+        )
+        .collectList()
+        .map(s -> internalClusterMetrics.toBuilder()
+            .internalBrokerMetrics(ClusterUtil.toSingleMap(s.stream())).build());
+  }
+
+  private InternalClusterMetrics calculateClusterMetrics(
+      InternalClusterMetrics internalClusterMetrics) {
+    final List<Metric> metrics = internalClusterMetrics.getInternalBrokerMetrics().values().stream()
+        .flatMap(b -> b.getMetrics().stream())
+        .collect(
+            Collectors.groupingBy(
+                Metric::getCanonicalName,
+                Collectors.reducing(jmxClusterUtil::reduceJmxMetrics)
+            )
+        ).values().stream()
+        .filter(Optional::isPresent)
+        .map(Optional::get)
+        .collect(Collectors.toList());
+    final InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
+        internalClusterMetrics.toBuilder().metrics(metrics);
+    metricsBuilder.bytesInPerSec(findTopicMetrics(
+        metrics, JmxMetricsName.BytesInPerSec, JmxMetricsValueName.FiveMinuteRate
+    ));
+    metricsBuilder.bytesOutPerSec(findTopicMetrics(
+        metrics, JmxMetricsName.BytesOutPerSec, JmxMetricsValueName.FiveMinuteRate
+    ));
+    return metricsBuilder.build();
+  }
+
+  private Map<String, BigDecimal> findTopicMetrics(List<Metric> metrics, JmxMetricsName metricsName,
+                                                   JmxMetricsValueName valueName) {
+    return metrics.stream().filter(m -> metricsName.name().equals(m.getName()))
+        .filter(m -> m.getParams().containsKey("topic"))
+        .filter(m -> m.getValue().containsKey(valueName.name()))
+        .map(m -> Tuples.of(
+            m.getParams().get("topic"),
+            m.getValue().get(valueName.name())
+        )).collect(Collectors.groupingBy(
+            Tuple2::getT1,
+            Collectors.reducing(BigDecimal.ZERO, Tuple2::getT2, BigDecimal::add)
         ));
-        metricsBuilder.bytesOutPerSec(findTopicMetrics(
-                metrics, JmxMetricsName.BytesOutPerSec, JmxMetricsValueName.FiveMinuteRate
+  }
+
+  public Map<Integer, InternalPartition> getTopicPartitions(KafkaCluster c, InternalTopic topic) {
+    var tps = topic.getPartitions().values().stream()
+        .map(t -> new TopicPartition(topic.getName(), t.getPartition()))
+        .collect(Collectors.toList());
+    Map<Integer, InternalPartition> partitions =
+        topic.getPartitions().values().stream().collect(Collectors.toMap(
+            InternalPartition::getPartition,
+            tp -> tp
         ));
-        return metricsBuilder.build();
-    }
 
-    private Map<String, BigDecimal> findTopicMetrics(List<Metric> metrics, JmxMetricsName metricsName, JmxMetricsValueName valueName) {
-        return metrics.stream().filter(m -> metricsName.name().equals(m.getName()))
-                .filter(m -> m.getParams().containsKey("topic"))
-                .filter(m -> m.getValue().containsKey(valueName.name()))
-                .map(m -> Tuples.of(
-                        m.getParams().get("topic"),
-                        m.getValue().get(valueName.name())
-                )).collect(Collectors.groupingBy(
-                    Tuple2::getT1,
-                    Collectors.reducing(BigDecimal.ZERO, Tuple2::getT2, BigDecimal::add)
-                ));
-    }
-
-    public Map<Integer, InternalPartition> getTopicPartitions(KafkaCluster c, InternalTopic topic )  {
-        var tps = topic.getPartitions().values().stream()
-                .map(t -> new TopicPartition(topic.getName(), t.getPartition()))
-                .collect(Collectors.toList());
-        Map<Integer, InternalPartition> partitions =
-                topic.getPartitions().values().stream().collect(Collectors.toMap(
-                        InternalPartition::getPartition,
-                        tp -> tp
-                ));
-
-        try (var consumer = createConsumer(c)) {
-            final Map<TopicPartition, Long> earliest = consumer.beginningOffsets(tps);
-            final Map<TopicPartition, Long> latest = consumer.endOffsets(tps);
-
-            return tps.stream()
-                    .map( tp -> partitions.get(tp.partition()).toBuilder()
-                            .offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L))
-                            .offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L))
-                            .build()
-                    ).collect(Collectors.toMap(
-                            InternalPartition::getPartition,
-                            tp -> tp
-                    ));
-        } catch (Exception e) {
-            return Collections.emptyMap();
-        }
-    }
-
-    public Mono<Void> deleteTopicMessages(KafkaCluster cluster, Map<TopicPartition, Long> offsets) {
-        var records = offsets.entrySet().stream()
-                .map(entry -> Map.entry(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue())))
-                .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
-        return getOrCreateAdminClient(cluster).map(ExtendedAdminClient::getAdminClient)
-                .map(ac -> ac.deleteRecords(records)).then();
-    }
+    try (var consumer = createConsumer(c)) {
+      final Map<TopicPartition, Long> earliest = consumer.beginningOffsets(tps);
+      final Map<TopicPartition, Long> latest = consumer.endOffsets(tps);
+
+      return tps.stream()
+          .map(tp -> partitions.get(tp.partition()).toBuilder()
+              .offsetMin(Optional.ofNullable(earliest.get(tp)).orElse(0L))
+              .offsetMax(Optional.ofNullable(latest.get(tp)).orElse(0L))
+              .build()
+          ).collect(Collectors.toMap(
+              InternalPartition::getPartition,
+              tp -> tp
+          ));
+    } catch (Exception e) {
+      return Collections.emptyMap();
+    }
+  }
+
+  public Mono<Void> deleteTopicMessages(KafkaCluster cluster, Map<TopicPartition, Long> offsets) {
+    var records = offsets.entrySet().stream()
+        .map(entry -> Map.entry(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue())))
+        .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
+    return getOrCreateAdminClient(cluster).map(ExtendedAdminClient::getAdminClient)
+        .map(ac -> ac.deleteRecords(records)).then();
+  }
 }

+ 5 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MetricsUpdateService.java

@@ -11,10 +11,10 @@ import reactor.core.publisher.Mono;
 @Log4j2
 public class MetricsUpdateService {
 
-    private final KafkaService kafkaService;
+  private final KafkaService kafkaService;
 
-    public Mono<KafkaCluster> updateMetrics(KafkaCluster kafkaCluster) {
-        log.debug("Start getting metrics for kafkaCluster: {}", kafkaCluster.getName());
-        return kafkaService.getUpdatedCluster(kafkaCluster);
-    }
+  public Mono<KafkaCluster> updateMetrics(KafkaCluster kafkaCluster) {
+    log.debug("Start getting metrics for kafkaCluster: {}", kafkaCluster.getName());
+    return kafkaService.getUpdatedCluster(kafkaCluster);
+  }
 }

+ 249 - 217
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/SchemaRegistryService.java

@@ -1,15 +1,25 @@
 package com.provectus.kafka.ui.service;
 
+import static org.springframework.http.HttpStatus.NOT_FOUND;
+import static org.springframework.http.HttpStatus.UNPROCESSABLE_ENTITY;
+
 import com.provectus.kafka.ui.exception.DuplicateEntityException;
 import com.provectus.kafka.ui.exception.NotFoundException;
 import com.provectus.kafka.ui.exception.UnprocessableEntityException;
 import com.provectus.kafka.ui.mapper.ClusterMapper;
+import com.provectus.kafka.ui.model.CompatibilityCheckResponse;
+import com.provectus.kafka.ui.model.CompatibilityLevel;
 import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.NewSchemaSubject;
+import com.provectus.kafka.ui.model.SchemaSubject;
+import com.provectus.kafka.ui.model.SchemaType;
 import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityCheck;
 import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
 import com.provectus.kafka.ui.model.schemaregistry.InternalNewSchema;
 import com.provectus.kafka.ui.model.schemaregistry.SubjectIdResponse;
-import com.provectus.kafka.ui.model.*;
+import java.util.Formatter;
+import java.util.Objects;
+import java.util.function.Function;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.jetbrains.annotations.NotNull;
@@ -22,249 +32,271 @@ import org.springframework.web.reactive.function.client.WebClient;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 
-import java.util.Formatter;
-import java.util.Objects;
-import java.util.function.Function;
-
-import static org.springframework.http.HttpStatus.NOT_FOUND;
-import static org.springframework.http.HttpStatus.UNPROCESSABLE_ENTITY;
-
 @Service
 @Log4j2
 @RequiredArgsConstructor
 public class SchemaRegistryService {
-    public static final String NO_SUCH_SCHEMA_VERSION = "No such schema %s with version %s";
-    public static final String NO_SUCH_SCHEMA = "No such schema %s";
-    public static final String NO_SUCH_CLUSTER = "No such cluster";
+  public static final String NO_SUCH_SCHEMA_VERSION = "No such schema %s with version %s";
+  public static final String NO_SUCH_SCHEMA = "No such schema %s";
+  public static final String NO_SUCH_CLUSTER = "No such cluster";
 
-    private static final String URL_SUBJECTS = "/subjects";
-    private static final String URL_SUBJECT = "/subjects/{schemaName}";
-    private static final String URL_SUBJECT_VERSIONS = "/subjects/{schemaName}/versions";
-    private static final String URL_SUBJECT_BY_VERSION = "/subjects/{schemaName}/versions/{version}";
-    private static final String LATEST = "latest";
+  private static final String URL_SUBJECTS = "/subjects";
+  private static final String URL_SUBJECT = "/subjects/{schemaName}";
+  private static final String URL_SUBJECT_VERSIONS = "/subjects/{schemaName}/versions";
+  private static final String URL_SUBJECT_BY_VERSION = "/subjects/{schemaName}/versions/{version}";
+  private static final String LATEST = "latest";
 
-    private final ClustersStorage clustersStorage;
-    private final ClusterMapper mapper;
-    private final WebClient webClient;
+  private final ClustersStorage clustersStorage;
+  private final ClusterMapper mapper;
+  private final WebClient webClient;
 
-    public Flux<SchemaSubject> getAllLatestVersionSchemas(String clusterName) {
-        var allSubjectNames = getAllSubjectNames(clusterName);
-        return allSubjectNames
-                .flatMapMany(Flux::fromArray)
-                .flatMap(subject -> getLatestSchemaVersionBySubject(clusterName, subject));
-    }
+  public Flux<SchemaSubject> getAllLatestVersionSchemas(String clusterName) {
+    var allSubjectNames = getAllSubjectNames(clusterName);
+    return allSubjectNames
+        .flatMapMany(Flux::fromArray)
+        .flatMap(subject -> getLatestSchemaVersionBySubject(clusterName, subject));
+  }
 
-    public Mono<String[]> getAllSubjectNames(String clusterName) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(cluster -> webClient.get()
-                        .uri(cluster.getSchemaRegistry() + URL_SUBJECTS)
-                        .retrieve()
-                        .bodyToMono(String[].class)
-                        .doOnError(log::error)
-                )
-                .orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
-    }
+  public Mono<String[]> getAllSubjectNames(String clusterName) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> webClient.get()
+            .uri(cluster.getSchemaRegistry() + URL_SUBJECTS)
+            .retrieve()
+            .bodyToMono(String[].class)
+            .doOnError(log::error)
+        )
+        .orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
+  }
 
-    public Flux<SchemaSubject> getAllVersionsBySubject(String clusterName, String subject) {
-        Flux<Integer> versions = getSubjectVersions(clusterName, subject);
-        return versions.flatMap(version -> getSchemaSubjectByVersion(clusterName, subject, version));
-    }
+  public Flux<SchemaSubject> getAllVersionsBySubject(String clusterName, String subject) {
+    Flux<Integer> versions = getSubjectVersions(clusterName, subject);
+    return versions.flatMap(version -> getSchemaSubjectByVersion(clusterName, subject, version));
+  }
 
-    private Flux<Integer> getSubjectVersions(String clusterName, String schemaName) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(cluster -> webClient.get()
-                        .uri(cluster.getSchemaRegistry() + URL_SUBJECT_VERSIONS, schemaName)
-                        .retrieve()
-                        .onStatus(NOT_FOUND::equals,
-                                throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA))
-                        ).bodyToFlux(Integer.class)
-                ).orElse(Flux.error(new NotFoundException(NO_SUCH_CLUSTER)));
-    }
+  private Flux<Integer> getSubjectVersions(String clusterName, String schemaName) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> webClient.get()
+            .uri(cluster.getSchemaRegistry() + URL_SUBJECT_VERSIONS, schemaName)
+            .retrieve()
+            .onStatus(NOT_FOUND::equals,
+                throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA))
+            ).bodyToFlux(Integer.class)
+        ).orElse(Flux.error(new NotFoundException(NO_SUCH_CLUSTER)));
+  }
 
-    public Mono<SchemaSubject> getSchemaSubjectByVersion(String clusterName, String schemaName, Integer version) {
-        return this.getSchemaSubject(clusterName, schemaName, String.valueOf(version));
-    }
+  public Mono<SchemaSubject> getSchemaSubjectByVersion(String clusterName, String schemaName,
+                                                       Integer version) {
+    return this.getSchemaSubject(clusterName, schemaName, String.valueOf(version));
+  }
 
-    public Mono<SchemaSubject> getLatestSchemaVersionBySubject(String clusterName, String schemaName) {
-        return this.getSchemaSubject(clusterName, schemaName, LATEST);
-    }
+  public Mono<SchemaSubject> getLatestSchemaVersionBySubject(String clusterName,
+                                                             String schemaName) {
+    return this.getSchemaSubject(clusterName, schemaName, LATEST);
+  }
 
-    private Mono<SchemaSubject> getSchemaSubject(String clusterName, String schemaName, String version) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(cluster -> webClient.get()
-                        .uri(cluster.getSchemaRegistry() + URL_SUBJECT_BY_VERSION, schemaName, version)
-                        .retrieve()
-                        .onStatus(NOT_FOUND::equals,
-                                throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
-                        ).bodyToMono(SchemaSubject.class)
-                        .map(this::withSchemaType)
-                        .zipWith(getSchemaCompatibilityInfoOrGlobal(clusterName, schemaName))
-                        .map(tuple -> {
-                            SchemaSubject schema = tuple.getT1();
-                            String compatibilityLevel = tuple.getT2().getCompatibility().getValue();
-                            schema.setCompatibilityLevel(compatibilityLevel);
-                            return schema;
-                        })
-                )
-                .orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
-    }
+  private Mono<SchemaSubject> getSchemaSubject(String clusterName, String schemaName,
+                                               String version) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> webClient.get()
+            .uri(cluster.getSchemaRegistry() + URL_SUBJECT_BY_VERSION, schemaName, version)
+            .retrieve()
+            .onStatus(NOT_FOUND::equals,
+                throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
+            ).bodyToMono(SchemaSubject.class)
+            .map(this::withSchemaType)
+            .zipWith(getSchemaCompatibilityInfoOrGlobal(clusterName, schemaName))
+            .map(tuple -> {
+              SchemaSubject schema = tuple.getT1();
+              String compatibilityLevel = tuple.getT2().getCompatibility().getValue();
+              schema.setCompatibilityLevel(compatibilityLevel);
+              return schema;
+            })
+        )
+        .orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
+  }
 
-    /**
-     * If {@link SchemaSubject#getSchemaType()} is null, then AVRO, otherwise, adds the schema type as is.
-     */
-    @NotNull
-    private SchemaSubject withSchemaType(SchemaSubject s) {
-        SchemaType schemaType = Objects.nonNull(s.getSchemaType()) ? s.getSchemaType() : SchemaType.AVRO;
-        return new SchemaSubject()
-                .schema(s.getSchema())
-                .subject(s.getSubject())
-                .version(s.getVersion())
-                .id(s.getId())
-                .schemaType(schemaType);
-    }
+  /**
+   * If {@link SchemaSubject#getSchemaType()} is null, then AVRO, otherwise,
+   * adds the schema type as is.
+   */
+  @NotNull
+  private SchemaSubject withSchemaType(SchemaSubject s) {
+    SchemaType schemaType =
+        Objects.nonNull(s.getSchemaType()) ? s.getSchemaType() : SchemaType.AVRO;
+    return new SchemaSubject()
+        .schema(s.getSchema())
+        .subject(s.getSubject())
+        .version(s.getVersion())
+        .id(s.getId())
+        .schemaType(schemaType);
+  }
 
-    public Mono<ResponseEntity<Void>> deleteSchemaSubjectByVersion(String clusterName, String schemaName, Integer version) {
-        return this.deleteSchemaSubject(clusterName, schemaName, String.valueOf(version));
-    }
+  public Mono<ResponseEntity<Void>> deleteSchemaSubjectByVersion(String clusterName,
+                                                                 String schemaName,
+                                                                 Integer version) {
+    return this.deleteSchemaSubject(clusterName, schemaName, String.valueOf(version));
+  }
 
-    public Mono<ResponseEntity<Void>> deleteLatestSchemaSubject(String clusterName, String schemaName) {
-        return this.deleteSchemaSubject(clusterName, schemaName, LATEST);
-    }
+  public Mono<ResponseEntity<Void>> deleteLatestSchemaSubject(String clusterName,
+                                                              String schemaName) {
+    return this.deleteSchemaSubject(clusterName, schemaName, LATEST);
+  }
 
-    private Mono<ResponseEntity<Void>> deleteSchemaSubject(String clusterName, String schemaName, String version) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(cluster -> webClient.delete()
-                        .uri(cluster.getSchemaRegistry() + URL_SUBJECT_BY_VERSION, schemaName, version)
-                        .retrieve()
-                        .onStatus(NOT_FOUND::equals,
-                                throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
-                        ).toBodilessEntity()
-                ).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
-    }
+  private Mono<ResponseEntity<Void>> deleteSchemaSubject(String clusterName, String schemaName,
+                                                         String version) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> webClient.delete()
+            .uri(cluster.getSchemaRegistry() + URL_SUBJECT_BY_VERSION, schemaName, version)
+            .retrieve()
+            .onStatus(NOT_FOUND::equals,
+                throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
+            ).toBodilessEntity()
+        ).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
+  }
 
-    public Mono<ResponseEntity<Void>> deleteSchemaSubjectEntirely(String clusterName, String schemaName) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(cluster -> webClient.delete()
-                        .uri(cluster.getSchemaRegistry() + URL_SUBJECT, schemaName)
-                        .retrieve()
-                        .onStatus(NOT_FOUND::equals, throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName))
-                        )
-                        .toBodilessEntity())
-                .orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
-    }
+  public Mono<ResponseEntity<Void>> deleteSchemaSubjectEntirely(String clusterName,
+                                                                String schemaName) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> webClient.delete()
+            .uri(cluster.getSchemaRegistry() + URL_SUBJECT, schemaName)
+            .retrieve()
+            .onStatus(NOT_FOUND::equals,
+                throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName))
+            )
+            .toBodilessEntity())
+        .orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
+  }
 
-    /**
-     * Checks whether the provided schema duplicates the previous or not, creates a new schema
-     * and then returns the whole content by requesting its latest version.
-     */
-    public Mono<SchemaSubject> registerNewSchema(String clusterName, Mono<NewSchemaSubject> newSchemaSubject) {
-        return newSchemaSubject
-                .flatMap(schema -> {
-                    SchemaType schemaType = SchemaType.AVRO == schema.getSchemaType() ? null : schema.getSchemaType();
-                    Mono<InternalNewSchema> newSchema = Mono.just(new InternalNewSchema(schema.getSchema(), schemaType));
-                    String subject = schema.getSubject();
-                    return clustersStorage.getClusterByName(clusterName)
-                            .map(KafkaCluster::getSchemaRegistry)
-                            .map(schemaRegistryUrl -> checkSchemaOnDuplicate(subject, newSchema, schemaRegistryUrl)
-                                    .flatMap(s -> submitNewSchema(subject, newSchema, schemaRegistryUrl))
-                                    .flatMap(resp -> getLatestSchemaVersionBySubject(clusterName, subject))
-                            )
-                            .orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
-                });
-    }
+  /**
+   * Checks whether the provided schema duplicates the previous or not, creates a new schema
+   * and then returns the whole content by requesting its latest version.
+   */
+  public Mono<SchemaSubject> registerNewSchema(String clusterName,
+                                               Mono<NewSchemaSubject> newSchemaSubject) {
+    return newSchemaSubject
+        .flatMap(schema -> {
+          SchemaType schemaType =
+              SchemaType.AVRO == schema.getSchemaType() ? null : schema.getSchemaType();
+          Mono<InternalNewSchema> newSchema =
+              Mono.just(new InternalNewSchema(schema.getSchema(), schemaType));
+          String subject = schema.getSubject();
+          return clustersStorage.getClusterByName(clusterName)
+              .map(KafkaCluster::getSchemaRegistry)
+              .map(
+                  schemaRegistryUrl -> checkSchemaOnDuplicate(subject, newSchema, schemaRegistryUrl)
+                      .flatMap(s -> submitNewSchema(subject, newSchema, schemaRegistryUrl))
+                      .flatMap(resp -> getLatestSchemaVersionBySubject(clusterName, subject))
+              )
+              .orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
+        });
+  }
 
-    @NotNull
-    private Mono<SubjectIdResponse> submitNewSchema(String subject, Mono<InternalNewSchema> newSchemaSubject, String schemaRegistryUrl) {
-        return webClient.post()
-                .uri(schemaRegistryUrl + URL_SUBJECT_VERSIONS, subject)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
-                .retrieve()
-                .onStatus(UNPROCESSABLE_ENTITY::equals, r -> Mono.error(new UnprocessableEntityException("Invalid params")))
-                .bodyToMono(SubjectIdResponse.class);
-    }
+  @NotNull
+  private Mono<SubjectIdResponse> submitNewSchema(String subject,
+                                                  Mono<InternalNewSchema> newSchemaSubject,
+                                                  String schemaRegistryUrl) {
+    return webClient.post()
+        .uri(schemaRegistryUrl + URL_SUBJECT_VERSIONS, subject)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
+        .retrieve()
+        .onStatus(UNPROCESSABLE_ENTITY::equals,
+            r -> Mono.error(new UnprocessableEntityException("Invalid params")))
+        .bodyToMono(SubjectIdResponse.class);
+  }
 
-    @NotNull
-    private Mono<SchemaSubject> checkSchemaOnDuplicate(String subject, Mono<InternalNewSchema> newSchemaSubject, String schemaRegistryUrl) {
-        return webClient.post()
-                .uri(schemaRegistryUrl + URL_SUBJECT, subject)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
-                .retrieve()
-                .onStatus(NOT_FOUND::equals, res -> Mono.empty())
-                .onStatus(UNPROCESSABLE_ENTITY::equals, r -> Mono.error(new UnprocessableEntityException("Invalid params")))
-                .bodyToMono(SchemaSubject.class)
-                .filter(s -> Objects.isNull(s.getId()))
-                .switchIfEmpty(Mono.error(new DuplicateEntityException("Such schema already exists")));
-    }
+  @NotNull
+  private Mono<SchemaSubject> checkSchemaOnDuplicate(String subject,
+                                                     Mono<InternalNewSchema> newSchemaSubject,
+                                                     String schemaRegistryUrl) {
+    return webClient.post()
+        .uri(schemaRegistryUrl + URL_SUBJECT, subject)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
+        .retrieve()
+        .onStatus(NOT_FOUND::equals, res -> Mono.empty())
+        .onStatus(UNPROCESSABLE_ENTITY::equals,
+            r -> Mono.error(new UnprocessableEntityException("Invalid params")))
+        .bodyToMono(SchemaSubject.class)
+        .filter(s -> Objects.isNull(s.getId()))
+        .switchIfEmpty(Mono.error(new DuplicateEntityException("Such schema already exists")));
+  }
 
-    @NotNull
-    private Function<ClientResponse, Mono<? extends Throwable>> throwIfNotFoundStatus(String formatted) {
-        return resp -> Mono.error(new NotFoundException(formatted));
-    }
+  @NotNull
+  private Function<ClientResponse, Mono<? extends Throwable>> throwIfNotFoundStatus(
+      String formatted) {
+    return resp -> Mono.error(new NotFoundException(formatted));
+  }
 
-    /**
-     * Updates a compatibility level for a <code>schemaName</code>
-     *
-     * @param schemaName is a schema subject name
-     * @see com.provectus.kafka.ui.model.CompatibilityLevel.CompatibilityEnum
-     */
-    public Mono<Void> updateSchemaCompatibility(String clusterName, String schemaName, Mono<CompatibilityLevel> compatibilityLevel) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(cluster -> {
-                    String configEndpoint = Objects.isNull(schemaName) ? "/config" : "/config/{schemaName}";
-                    return webClient.put()
-                            .uri(cluster.getSchemaRegistry() + configEndpoint, schemaName)
-                            .contentType(MediaType.APPLICATION_JSON)
-                            .body(BodyInserters.fromPublisher(compatibilityLevel, CompatibilityLevel.class))
-                            .retrieve()
-                            .onStatus(NOT_FOUND::equals,
-                                    throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
-                            .bodyToMono(Void.class);
-                }).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
-    }
+  /**
+   * Updates a compatibility level for a <code>schemaName</code>.
+   *
+   * @param schemaName is a schema subject name
+   * @see com.provectus.kafka.ui.model.CompatibilityLevel.CompatibilityEnum
+   */
+  public Mono<Void> updateSchemaCompatibility(String clusterName, String schemaName,
+                                              Mono<CompatibilityLevel> compatibilityLevel) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> {
+          String configEndpoint = Objects.isNull(schemaName) ? "/config" : "/config/{schemaName}";
+          return webClient.put()
+              .uri(cluster.getSchemaRegistry() + configEndpoint, schemaName)
+              .contentType(MediaType.APPLICATION_JSON)
+              .body(BodyInserters.fromPublisher(compatibilityLevel, CompatibilityLevel.class))
+              .retrieve()
+              .onStatus(NOT_FOUND::equals,
+                  throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
+              .bodyToMono(Void.class);
+        }).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
+  }
 
-    public Mono<Void> updateSchemaCompatibility(String clusterName, Mono<CompatibilityLevel> compatibilityLevel) {
-        return updateSchemaCompatibility(clusterName, null, compatibilityLevel);
-    }
+  public Mono<Void> updateSchemaCompatibility(String clusterName,
+                                              Mono<CompatibilityLevel> compatibilityLevel) {
+    return updateSchemaCompatibility(clusterName, null, compatibilityLevel);
+  }
 
-    public Mono<CompatibilityLevel> getSchemaCompatibilityLevel(String clusterName, String schemaName) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(cluster -> {
-                    String configEndpoint = Objects.isNull(schemaName) ? "/config" : "/config/{schemaName}";
-                    return webClient.get()
-                            .uri(cluster.getSchemaRegistry() + configEndpoint, schemaName)
-                            .retrieve()
-                            .bodyToMono(InternalCompatibilityLevel.class)
-                            .map(mapper::toCompatibilityLevel)
-                            .onErrorResume(error -> Mono.empty());
-                }).orElse(Mono.empty());
-    }
+  public Mono<CompatibilityLevel> getSchemaCompatibilityLevel(String clusterName,
+                                                              String schemaName) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> {
+          String configEndpoint = Objects.isNull(schemaName) ? "/config" : "/config/{schemaName}";
+          return webClient.get()
+              .uri(cluster.getSchemaRegistry() + configEndpoint, schemaName)
+              .retrieve()
+              .bodyToMono(InternalCompatibilityLevel.class)
+              .map(mapper::toCompatibilityLevel)
+              .onErrorResume(error -> Mono.empty());
+        }).orElse(Mono.empty());
+  }
 
-    public Mono<CompatibilityLevel> getGlobalSchemaCompatibilityLevel(String clusterName) {
-        return this.getSchemaCompatibilityLevel(clusterName, null);
-    }
+  public Mono<CompatibilityLevel> getGlobalSchemaCompatibilityLevel(String clusterName) {
+    return this.getSchemaCompatibilityLevel(clusterName, null);
+  }
 
-    private Mono<CompatibilityLevel> getSchemaCompatibilityInfoOrGlobal(String clusterName, String schemaName) {
-        return this.getSchemaCompatibilityLevel(clusterName, schemaName)
-                .switchIfEmpty(this.getGlobalSchemaCompatibilityLevel(clusterName));
-    }
+  private Mono<CompatibilityLevel> getSchemaCompatibilityInfoOrGlobal(String clusterName,
+                                                                      String schemaName) {
+    return this.getSchemaCompatibilityLevel(clusterName, schemaName)
+        .switchIfEmpty(this.getGlobalSchemaCompatibilityLevel(clusterName));
+  }
 
-    public Mono<CompatibilityCheckResponse> checksSchemaCompatibility(String clusterName, String schemaName, Mono<NewSchemaSubject> newSchemaSubject) {
-        return clustersStorage.getClusterByName(clusterName)
-                .map(cluster -> webClient.post()
-                        .uri(cluster.getSchemaRegistry() + "/compatibility/subjects/{schemaName}/versions/latest", schemaName)
-                        .contentType(MediaType.APPLICATION_JSON)
-                        .body(BodyInserters.fromPublisher(newSchemaSubject, NewSchemaSubject.class))
-                        .retrieve()
-                        .onStatus(NOT_FOUND::equals, throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
-                        .bodyToMono(InternalCompatibilityCheck.class)
-                        .map(mapper::toCompatibilityCheckResponse)
-                        .log()
-                ).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
-    }
+  public Mono<CompatibilityCheckResponse> checksSchemaCompatibility(
+      String clusterName, String schemaName, Mono<NewSchemaSubject> newSchemaSubject) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> webClient.post()
+            .uri(cluster.getSchemaRegistry()
+                + "/compatibility/subjects/{schemaName}/versions/latest", schemaName)
+            .contentType(MediaType.APPLICATION_JSON)
+            .body(BodyInserters.fromPublisher(newSchemaSubject, NewSchemaSubject.class))
+            .retrieve()
+            .onStatus(NOT_FOUND::equals,
+                throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName)))
+            .bodyToMono(InternalCompatibilityCheck.class)
+            .map(mapper::toCompatibilityCheckResponse)
+            .log()
+        ).orElse(Mono.error(new NotFoundException(NO_SUCH_CLUSTER)));
+  }
 
-    public String formatted(String str, Object... args) {
-        return new Formatter().format(str, args).toString();
-    }
+  public String formatted(String str, Object... args) {
+    return new Formatter().format(str, args).toString();
+  }
 }

+ 24 - 25
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ZookeeperService.java

@@ -1,45 +1,44 @@
 package com.provectus.kafka.ui.service;
 
 import com.provectus.kafka.ui.model.KafkaCluster;
+import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import org.I0Itec.zkclient.ZkClient;
 import org.springframework.stereotype.Service;
 
-import java.util.Map;
-
 @Service
 @RequiredArgsConstructor
 @Log4j2
 public class ZookeeperService {
 
-    private final Map<String, ZkClient> cachedZkClient = new ConcurrentHashMap<>();
+  private final Map<String, ZkClient> cachedZkClient = new ConcurrentHashMap<>();
 
-    public boolean isZookeeperOnline(KafkaCluster kafkaCluster) {
-        var isConnected = false;
-        var zkClient = getOrCreateZkClient(kafkaCluster);
-        log.debug("Start getting Zookeeper metrics for kafkaCluster: {}", kafkaCluster.getName());
-        if (zkClient != null) {
-            isConnected = isZkClientConnected(zkClient);
-        }
-        return isConnected;
+  public boolean isZookeeperOnline(KafkaCluster kafkaCluster) {
+    var isConnected = false;
+    var zkClient = getOrCreateZkClient(kafkaCluster);
+    log.debug("Start getting Zookeeper metrics for kafkaCluster: {}", kafkaCluster.getName());
+    if (zkClient != null) {
+      isConnected = isZkClientConnected(zkClient);
     }
+    return isConnected;
+  }
 
-    private boolean isZkClientConnected(ZkClient zkClient) {
-        zkClient.getChildren("/brokers/ids");
-        return true;
-    }
+  private boolean isZkClientConnected(ZkClient zkClient) {
+    zkClient.getChildren("/brokers/ids");
+    return true;
+  }
 
-    private ZkClient getOrCreateZkClient (KafkaCluster cluster) {
-        try {
-            return cachedZkClient.computeIfAbsent(
-                cluster.getName(),
-                (n) -> new ZkClient(cluster.getZookeeper(), 1000)
-            );
-        } catch (Exception e) {
-            log.error("Error while creating zookeeper client for cluster {}", cluster.getName());
-            return null;
-        }
+  private ZkClient getOrCreateZkClient(KafkaCluster cluster) {
+    try {
+      return cachedZkClient.computeIfAbsent(
+          cluster.getName(),
+          (n) -> new ZkClient(cluster.getZookeeper(), 1000)
+      );
+    } catch (Exception e) {
+      log.error("Error while creating zookeeper client for cluster {}", cluster.getName());
+      return null;
     }
+  }
 }

+ 229 - 199
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java

@@ -1,9 +1,37 @@
 package com.provectus.kafka.ui.util;
 
+import static com.provectus.kafka.ui.util.KafkaConstants.TOPIC_DEFAULT_CONFIGS;
+import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG;
+
 import com.provectus.kafka.ui.deserialization.RecordDeserializer;
-import com.provectus.kafka.ui.model.*;
+import com.provectus.kafka.ui.model.ConsumerGroup;
+import com.provectus.kafka.ui.model.ConsumerTopicPartitionDetail;
+import com.provectus.kafka.ui.model.ExtendedAdminClient;
+import com.provectus.kafka.ui.model.InternalPartition;
+import com.provectus.kafka.ui.model.InternalReplica;
+import com.provectus.kafka.ui.model.InternalTopic;
+import com.provectus.kafka.ui.model.InternalTopicConfig;
+import com.provectus.kafka.ui.model.ServerStatus;
+import com.provectus.kafka.ui.model.TopicMessage;
+import java.time.Instant;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.admin.*;
+import org.apache.kafka.clients.admin.AdminClient;
+import org.apache.kafka.clients.admin.Config;
+import org.apache.kafka.clients.admin.ConfigEntry;
+import org.apache.kafka.clients.admin.ConsumerGroupDescription;
+import org.apache.kafka.clients.admin.MemberDescription;
+import org.apache.kafka.clients.admin.TopicDescription;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.common.KafkaFuture;
@@ -14,210 +42,212 @@ import org.apache.kafka.common.record.TimestampType;
 import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.Mono;
 
-import java.time.Instant;
-import java.time.OffsetDateTime;
-import java.time.ZoneId;
-import java.util.*;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-
-import static com.provectus.kafka.ui.util.KafkaConstants.TOPIC_DEFAULT_CONFIGS;
-import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG;
-
 @Slf4j
 public class ClusterUtil {
 
-    private static final String CLUSTER_VERSION_PARAM_KEY = "inter.broker.protocol.version";
-
-    private static final ZoneId UTC_ZONE_ID = ZoneId.of("UTC");
-
-    public static <T> Mono<T> toMono(KafkaFuture<T> future) {
-        return Mono.create(sink -> future.whenComplete((res, ex) -> {
-            if (ex != null) {
-                sink.error(ex);
-            } else {
-                sink.success(res);
-            }
-        }));
+  private static final String CLUSTER_VERSION_PARAM_KEY = "inter.broker.protocol.version";
+
+  private static final ZoneId UTC_ZONE_ID = ZoneId.of("UTC");
+
+  public static <T> Mono<T> toMono(KafkaFuture<T> future) {
+    return Mono.create(sink -> future.whenComplete((res, ex) -> {
+      if (ex != null) {
+        sink.error(ex);
+      } else {
+        sink.success(res);
+      }
+    }));
+  }
+
+  public static Mono<String> toMono(KafkaFuture<Void> future, String topicName) {
+    return Mono.create(sink -> future.whenComplete((res, ex) -> {
+      if (ex != null) {
+        sink.error(ex);
+      } else {
+        sink.success(topicName);
+      }
+    }));
+  }
+
+  public static ConsumerGroup convertToConsumerGroup(ConsumerGroupDescription c) {
+    ConsumerGroup consumerGroup = new ConsumerGroup();
+    consumerGroup.setConsumerGroupId(c.groupId());
+    consumerGroup.setNumConsumers(c.members().size());
+    int numTopics = c.members().stream()
+        .flatMap(m -> m.assignment().topicPartitions().stream().flatMap(t -> Stream.of(t.topic())))
+        .collect(Collectors.toSet()).size();
+    consumerGroup.setNumTopics(numTopics);
+    return consumerGroup;
+  }
+
+  public static List<ConsumerTopicPartitionDetail> convertToConsumerTopicPartitionDetails(
+      MemberDescription consumer,
+      Map<TopicPartition, OffsetAndMetadata> groupOffsets,
+      Map<TopicPartition, Long> endOffsets
+  ) {
+    return consumer.assignment().topicPartitions().stream()
+        .map(tp -> {
+          Long currentOffset = Optional.ofNullable(
+              groupOffsets.get(tp)).map(o -> o.offset()).orElse(0L);
+          Long endOffset = Optional.ofNullable(endOffsets.get(tp)).orElse(0L);
+          ConsumerTopicPartitionDetail cd = new ConsumerTopicPartitionDetail();
+          cd.setConsumerId(consumer.consumerId());
+          cd.setHost(consumer.host());
+          cd.setTopic(tp.topic());
+          cd.setPartition(tp.partition());
+          cd.setCurrentOffset(currentOffset);
+          cd.setEndOffset(endOffset);
+          cd.setMessagesBehind(endOffset - currentOffset);
+          return cd;
+        }).collect(Collectors.toList());
+  }
+
+
+  public static InternalTopicConfig mapToInternalTopicConfig(ConfigEntry configEntry) {
+    InternalTopicConfig.InternalTopicConfigBuilder builder = InternalTopicConfig.builder()
+        .name(configEntry.name())
+        .value(configEntry.value());
+    if (configEntry.name().equals(MESSAGE_FORMAT_VERSION_CONFIG)) {
+      builder.defaultValue(configEntry.value());
+    } else {
+      builder.defaultValue(TOPIC_DEFAULT_CONFIGS.get(configEntry.name()));
     }
-
-    public static Mono<String> toMono(KafkaFuture<Void> future, String topicName) {
-        return Mono.create(sink -> future.whenComplete((res, ex) -> {
-            if (ex != null) {
-                sink.error(ex);
-            } else {
-                sink.success(topicName);
-            }
-        }));
-    }
-
-    public static ConsumerGroup convertToConsumerGroup(ConsumerGroupDescription c) {
-        ConsumerGroup consumerGroup = new ConsumerGroup();
-        consumerGroup.setConsumerGroupId(c.groupId());
-        consumerGroup.setNumConsumers(c.members().size());
-        int numTopics = c.members().stream().flatMap(m -> m.assignment().topicPartitions().stream().flatMap(t -> Stream.of(t.topic()))).collect(Collectors.toSet()).size();
-        consumerGroup.setNumTopics(numTopics);
-        return consumerGroup;
-    }
-
-    public static List<ConsumerTopicPartitionDetail> convertToConsumerTopicPartitionDetails(
-            MemberDescription consumer,
-            Map<TopicPartition, OffsetAndMetadata> groupOffsets,
-            Map<TopicPartition, Long> endOffsets
-    ) {
-        return consumer.assignment().topicPartitions().stream()
-                .map(tp -> {
-                    Long currentOffset = Optional.ofNullable(
-                            groupOffsets.get(tp)).map(o -> o.offset()).orElse(0L);
-                    Long endOffset = Optional.ofNullable(endOffsets.get(tp)).orElse(0L);
-                    ConsumerTopicPartitionDetail cd = new ConsumerTopicPartitionDetail();
-                    cd.setConsumerId(consumer.consumerId());
-                    cd.setHost(consumer.host());
-                    cd.setTopic(tp.topic());
-                    cd.setPartition(tp.partition());
-                    cd.setCurrentOffset(currentOffset);
-                    cd.setEndOffset(endOffset);
-                    cd.setMessagesBehind(endOffset - currentOffset);
-                    return cd;
-                }).collect(Collectors.toList());
+    return builder.build();
+  }
+
+  public static InternalTopic mapToInternalTopic(TopicDescription topicDescription) {
+    var topic = InternalTopic.builder();
+    topic.internal(topicDescription.isInternal());
+    topic.name(topicDescription.name());
+
+    List<InternalPartition> partitions = topicDescription.partitions().stream().map(
+        partition -> {
+          var partitionDto = InternalPartition.builder();
+          partitionDto.leader(partition.leader().id());
+          partitionDto.partition(partition.partition());
+          partitionDto.inSyncReplicasCount(partition.isr().size());
+          partitionDto.replicasCount(partition.replicas().size());
+          List<InternalReplica> replicas = partition.replicas().stream().map(
+              r -> new InternalReplica(r.id(), partition.leader().id() != r.id(),
+                  partition.isr().contains(r)))
+              .collect(Collectors.toList());
+          partitionDto.replicas(replicas);
+          return partitionDto.build();
+        })
+        .collect(Collectors.toList());
+
+    int urpCount = partitions.stream()
+        .flatMap(partition -> partition.getReplicas().stream())
+        .filter(p -> !p.isInSync()).mapToInt(e -> 1)
+        .sum();
+
+    int inSyncReplicasCount = partitions.stream()
+        .mapToInt(InternalPartition::getInSyncReplicasCount)
+        .sum();
+
+    int replicasCount = partitions.stream()
+        .mapToInt(InternalPartition::getReplicasCount)
+        .sum();
+
+    topic.partitions(partitions.stream().collect(Collectors.toMap(
+        InternalPartition::getPartition,
+        t -> t
+    )));
+    topic.replicas(replicasCount);
+    topic.partitionCount(topicDescription.partitions().size());
+    topic.inSyncReplicas(inSyncReplicasCount);
+
+    topic.replicationFactor(
+        topicDescription.partitions().size() > 0
+            ? topicDescription.partitions().get(0).replicas().size()
+            : 0
+    );
+
+    topic.underReplicatedPartitions(urpCount);
+
+    return topic.build();
+  }
+
+  public static int convertToIntServerStatus(ServerStatus serverStatus) {
+    return serverStatus.equals(ServerStatus.ONLINE) ? 1 : 0;
+  }
+
+  public static TopicMessage mapToTopicMessage(ConsumerRecord<Bytes, Bytes> consumerRecord,
+                                               RecordDeserializer recordDeserializer) {
+    Map<String, String> headers = new HashMap<>();
+    consumerRecord.headers().iterator()
+        .forEachRemaining(header -> headers.put(header.key(), new String(header.value())));
+
+    TopicMessage topicMessage = new TopicMessage();
+
+    OffsetDateTime timestamp =
+        OffsetDateTime.ofInstant(Instant.ofEpochMilli(consumerRecord.timestamp()), UTC_ZONE_ID);
+    TopicMessage.TimestampTypeEnum timestampType =
+        mapToTimestampType(consumerRecord.timestampType());
+    topicMessage.setPartition(consumerRecord.partition());
+    topicMessage.setOffset(consumerRecord.offset());
+    topicMessage.setTimestamp(timestamp);
+    topicMessage.setTimestampType(timestampType);
+    if (consumerRecord.key() != null) {
+      topicMessage.setKey(consumerRecord.key().toString());
     }
-
-
-    public static InternalTopicConfig mapToInternalTopicConfig(ConfigEntry configEntry) {
-        InternalTopicConfig.InternalTopicConfigBuilder builder = InternalTopicConfig.builder()
-                .name(configEntry.name())
-                .value(configEntry.value());
-        if (configEntry.name().equals(MESSAGE_FORMAT_VERSION_CONFIG)) {
-            builder.defaultValue(configEntry.value());
-        } else {
-            builder.defaultValue(TOPIC_DEFAULT_CONFIGS.get(configEntry.name()));
-        }
-        return builder.build();
+    topicMessage.setHeaders(headers);
+    Object parsedValue = recordDeserializer.deserialize(consumerRecord);
+    topicMessage.setContent(parsedValue);
+
+    return topicMessage;
+  }
+
+  private static TopicMessage.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
+    switch (timestampType) {
+      case CREATE_TIME:
+        return TopicMessage.TimestampTypeEnum.CREATE_TIME;
+      case LOG_APPEND_TIME:
+        return TopicMessage.TimestampTypeEnum.LOG_APPEND_TIME;
+      case NO_TIMESTAMP_TYPE:
+        return TopicMessage.TimestampTypeEnum.NO_TIMESTAMP_TYPE;
+      default:
+        throw new IllegalArgumentException("Unknown timestampType: " + timestampType);
     }
-
-    public static InternalTopic mapToInternalTopic(TopicDescription topicDescription) {
-        var topic = InternalTopic.builder();
-        topic.internal(topicDescription.isInternal());
-        topic.name(topicDescription.name());
-
-        List<InternalPartition> partitions = topicDescription.partitions().stream().map(
-                partition -> {
-                    var partitionDto = InternalPartition.builder();
-                    partitionDto.leader(partition.leader().id());
-                    partitionDto.partition(partition.partition());
-                    partitionDto.inSyncReplicasCount(partition.isr().size());
-                    partitionDto.replicasCount(partition.replicas().size());
-                    List<InternalReplica> replicas = partition.replicas().stream().map(
-                            r -> new InternalReplica(r.id(), partition.leader().id() != r.id(), partition.isr().contains(r)))
-                            .collect(Collectors.toList());
-                    partitionDto.replicas(replicas);
-                    return partitionDto.build();
-                })
-                .collect(Collectors.toList());
-
-        int urpCount = partitions.stream()
-                .flatMap(partition -> partition.getReplicas().stream())
-                .filter(p -> !p.isInSync()).mapToInt(e -> 1)
-                .sum();
-
-        int inSyncReplicasCount = partitions.stream()
-                .mapToInt(InternalPartition::getInSyncReplicasCount)
-                .sum();
-
-        int replicasCount = partitions.stream()
-                .mapToInt(InternalPartition::getReplicasCount)
-                .sum();
-
-        topic.partitions(partitions.stream().collect(Collectors.toMap(
-                InternalPartition::getPartition,
-                t -> t
-        )));
-        topic.replicas(replicasCount);
-        topic.partitionCount(topicDescription.partitions().size());
-        topic.inSyncReplicas(inSyncReplicasCount);
-
-        topic.replicationFactor(
-                topicDescription.partitions().size() > 0 ?
-                        topicDescription.partitions().get(0).replicas().size() : 0
-        );
-
-        topic.underReplicatedPartitions(urpCount);
-
-        return topic.build();
-    }
-
-    public static int convertToIntServerStatus(ServerStatus serverStatus) {
-        return serverStatus.equals(ServerStatus.ONLINE) ? 1 : 0;
+  }
+
+  public static Mono<Set<ExtendedAdminClient.SupportedFeature>> getSupportedFeatures(
+      AdminClient adminClient) {
+    return ClusterUtil.toMono(adminClient.describeCluster().controller())
+        .map(Node::id)
+        .map(id -> Collections
+            .singletonList(new ConfigResource(ConfigResource.Type.BROKER, id.toString())))
+        .map(brokerCR -> adminClient.describeConfigs(brokerCR).all())
+        .flatMap(ClusterUtil::toMono)
+        .map(ClusterUtil::getSupportedUpdateFeature)
+        .map(Collections::singleton);
+  }
+
+  private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(
+      Map<ConfigResource, Config> configs) {
+    String version = configs.values().stream()
+        .map(Config::entries)
+        .flatMap(Collection::stream)
+        .filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
+        .findFirst().orElseThrow().value();
+    try {
+      final String[] parts = version.split("\\.");
+      if (parts.length > 2) {
+        version = parts[0] + "." + parts[1];
+      }
+      return Float.parseFloat(version.split("-")[0]) <= 2.3f
+          ? ExtendedAdminClient.SupportedFeature.ALTER_CONFIGS :
+          ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS;
+    } catch (Exception e) {
+      log.error("Conversion clusterVersion {} to float value failed", version);
+      throw e;
     }
+  }
 
-    public static TopicMessage mapToTopicMessage(ConsumerRecord<Bytes, Bytes> consumerRecord, RecordDeserializer recordDeserializer) {
-        OffsetDateTime timestamp = OffsetDateTime.ofInstant(Instant.ofEpochMilli(consumerRecord.timestamp()), UTC_ZONE_ID);
-        TopicMessage.TimestampTypeEnum timestampType = mapToTimestampType(consumerRecord.timestampType());
-        Map<String, String> headers = new HashMap<>();
-        consumerRecord.headers().iterator()
-                .forEachRemaining(header -> headers.put(header.key(), new String(header.value())));
-
-        TopicMessage topicMessage = new TopicMessage();
-
-        topicMessage.setPartition(consumerRecord.partition());
-        topicMessage.setOffset(consumerRecord.offset());
-        topicMessage.setTimestamp(timestamp);
-        topicMessage.setTimestampType(timestampType);
-        if (consumerRecord.key() != null) {
-            topicMessage.setKey(consumerRecord.key().toString());
-        }
-        topicMessage.setHeaders(headers);
-        Object parsedValue = recordDeserializer.deserialize(consumerRecord);
-        topicMessage.setContent(parsedValue);
-
-        return topicMessage;
-    }
-
-    private static TopicMessage.TimestampTypeEnum mapToTimestampType(TimestampType timestampType) {
-        switch (timestampType) {
-            case CREATE_TIME:
-                return TopicMessage.TimestampTypeEnum.CREATE_TIME;
-            case LOG_APPEND_TIME:
-                return TopicMessage.TimestampTypeEnum.LOG_APPEND_TIME;
-            case NO_TIMESTAMP_TYPE:
-                return TopicMessage.TimestampTypeEnum.NO_TIMESTAMP_TYPE;
-            default:
-                throw new IllegalArgumentException("Unknown timestampType: " + timestampType);
-        }
-    }
-
-    public static Mono<Set<ExtendedAdminClient.SupportedFeature>> getSupportedFeatures(AdminClient adminClient) {
-        return ClusterUtil.toMono(adminClient.describeCluster().controller())
-                .map(Node::id)
-                .map(id -> Collections.singletonList(new ConfigResource(ConfigResource.Type.BROKER, id.toString())))
-                .map(brokerCR -> adminClient.describeConfigs(brokerCR).all())
-                .flatMap(ClusterUtil::toMono)
-                .map(ClusterUtil::getSupportedUpdateFeature)
-                .map(Collections::singleton);
-    }
-
-    private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(Map<ConfigResource, Config> configs) {
-        String version = configs.values().stream()
-                .map(Config::entries)
-                .flatMap(Collection::stream)
-                .filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
-                .findFirst().orElseThrow().value();
-        try {
-            final String[] parts = version.split("\\.");
-            if (parts.length>2) {
-              version = parts[0] + "." + parts[1];
-            }        
-            return Float.parseFloat(version.split("-")[0]) <= 2.3f
-                    ? ExtendedAdminClient.SupportedFeature.ALTER_CONFIGS : ExtendedAdminClient.SupportedFeature.INCREMENTAL_ALTER_CONFIGS;
-        } catch (Exception e) {
-            log.error("Conversion clusterVersion {} to float value failed", version);
-            throw e;
-        }
-    }
-
-    public static <T, R> Map<T, R> toSingleMap (Stream<Map<T, R>> streamOfMaps) {
-        return streamOfMaps.reduce((map1, map2) -> Stream.concat(map1.entrySet().stream(), map2.entrySet().stream())
-                .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow();
-    }
+  public static <T, R> Map<T, R> toSingleMap(Stream<Map<T, R>> streamOfMaps) {
+    return streamOfMaps
+        .reduce((map1, map2) -> Stream.concat(map1.entrySet().stream(), map2.entrySet().stream())
+            .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow();
+  }
 
 }

+ 112 - 96
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxClusterUtil.java

@@ -1,122 +1,138 @@
 package com.provectus.kafka.ui.util;
 
 import com.provectus.kafka.ui.model.Metric;
-import lombok.RequiredArgsConstructor;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.pool2.KeyedObjectPool;
-import org.springframework.stereotype.Component;
-
-import javax.management.*;
-import javax.management.remote.JMXConnector;
 import java.io.IOException;
 import java.math.BigDecimal;
 import java.net.MalformedURLException;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Hashtable;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
+import javax.management.AttributeNotFoundException;
+import javax.management.InstanceNotFoundException;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanException;
+import javax.management.MBeanServerConnection;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import javax.management.ReflectionException;
+import javax.management.remote.JMXConnector;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.pool2.KeyedObjectPool;
+import org.springframework.stereotype.Component;
 
 @Component
 @Slf4j
 @RequiredArgsConstructor
 public class JmxClusterUtil {
 
-    private final KeyedObjectPool<String, JMXConnector> pool;
+  private static final String JMX_URL = "service:jmx:rmi:///jndi/rmi://";
+  private static final String JMX_SERVICE_TYPE = "jmxrmi";
+  private static final String KAFKA_SERVER_PARAM = "kafka.server";
+  private static final String NAME_METRIC_FIELD = "name";
+  private final KeyedObjectPool<String, JMXConnector> pool;
 
-    private static final String JMX_URL = "service:jmx:rmi:///jndi/rmi://";
-    private static final String JMX_SERVICE_TYPE = "jmxrmi";
-    private static final String KAFKA_SERVER_PARAM = "kafka.server";
-    private static final String NAME_METRIC_FIELD = "name";
-
-    public List<Metric> getJmxMetrics(int jmxPort, String jmxHost) {
-        String jmxUrl = JMX_URL + jmxHost + ":" + jmxPort + "/" + JMX_SERVICE_TYPE;
-        List<Metric> result = new ArrayList<>();
-        JMXConnector srv = null;
-        try {
-            srv = pool.borrowObject(jmxUrl);
-            MBeanServerConnection msc = srv.getMBeanServerConnection();
-            var jmxMetrics = msc.queryNames(null, null).stream().filter(q -> q.getCanonicalName().startsWith(KAFKA_SERVER_PARAM)).collect(Collectors.toList());
-            for (ObjectName jmxMetric : jmxMetrics) {
-                final Hashtable<String, String> params = jmxMetric.getKeyPropertyList();
-                Metric metric = new Metric();
-                metric.setName(params.get(NAME_METRIC_FIELD));
-                metric.setCanonicalName(jmxMetric.getCanonicalName());
-                metric.setParams(params);
-                metric.setValue(getJmxMetric(jmxMetric.getCanonicalName(), msc, srv, jmxUrl));
-                result.add(metric);
-            }
-            pool.returnObject(jmxUrl, srv);
-        } catch (IOException ioe) {
-            log.error("Cannot get jmxMetricsNames, {}", jmxUrl, ioe);
-            closeConnectionExceptionally(jmxUrl, srv);
-        } catch (Exception e) {
-            log.error("Cannot get JmxConnection from pool, {}", jmxUrl, e);
-            closeConnectionExceptionally(jmxUrl, srv);
-        }
-        return result;
+  public List<Metric> getJmxMetrics(int jmxPort, String jmxHost) {
+    String jmxUrl = JMX_URL + jmxHost + ":" + jmxPort + "/" + JMX_SERVICE_TYPE;
+    List<Metric> result = new ArrayList<>();
+    JMXConnector srv = null;
+    try {
+      srv = pool.borrowObject(jmxUrl);
+      MBeanServerConnection msc = srv.getMBeanServerConnection();
+      var jmxMetrics = msc.queryNames(null, null).stream()
+          .filter(q -> q.getCanonicalName().startsWith(KAFKA_SERVER_PARAM))
+          .collect(Collectors.toList());
+      for (ObjectName jmxMetric : jmxMetrics) {
+        final Hashtable<String, String> params = jmxMetric.getKeyPropertyList();
+        Metric metric = new Metric();
+        metric.setName(params.get(NAME_METRIC_FIELD));
+        metric.setCanonicalName(jmxMetric.getCanonicalName());
+        metric.setParams(params);
+        metric.setValue(getJmxMetric(jmxMetric.getCanonicalName(), msc, srv, jmxUrl));
+        result.add(metric);
+      }
+      pool.returnObject(jmxUrl, srv);
+    } catch (IOException ioe) {
+      log.error("Cannot get jmxMetricsNames, {}", jmxUrl, ioe);
+      closeConnectionExceptionally(jmxUrl, srv);
+    } catch (Exception e) {
+      log.error("Cannot get JmxConnection from pool, {}", jmxUrl, e);
+      closeConnectionExceptionally(jmxUrl, srv);
     }
+    return result;
+  }
 
 
-
-    private Map<String, BigDecimal> getJmxMetric(String canonicalName, MBeanServerConnection msc, JMXConnector srv, String jmxUrl) {
-        Map<String, BigDecimal> resultAttr = new HashMap<>();
-        try {
-            ObjectName name = new ObjectName(canonicalName);
-            var attrNames = msc.getMBeanInfo(name).getAttributes();
-            for (MBeanAttributeInfo attrName : attrNames) {
-                var value = msc.getAttribute(name, attrName.getName());
-                if (value instanceof Number) {
-                    if (!(value instanceof Double) || !((Double) value).isInfinite())
-                    resultAttr.put(attrName.getName(), new BigDecimal(value.toString()));
-                }
-            }
-        } catch (MalformedURLException url) {
-            log.error("Cannot create JmxServiceUrl from {}", jmxUrl);
-            closeConnectionExceptionally(jmxUrl, srv);
-        } catch (IOException io) {
-            log.error("Cannot connect to KafkaJmxServer with url {}", jmxUrl);
-            closeConnectionExceptionally(jmxUrl, srv);
-        } catch (MBeanException | AttributeNotFoundException | InstanceNotFoundException | ReflectionException e) {
-            log.error("Cannot find attribute", e);
-            closeConnectionExceptionally(jmxUrl, srv);
-        } catch (MalformedObjectNameException objectNameE) {
-            log.error("Cannot create objectName", objectNameE);
-            closeConnectionExceptionally(jmxUrl, srv);
-        } catch (Exception e) {
-            log.error("Error while retrieving connection {} from pool", jmxUrl);
-            closeConnectionExceptionally(jmxUrl, srv);
+  private Map<String, BigDecimal> getJmxMetric(String canonicalName, MBeanServerConnection msc,
+                                               JMXConnector srv, String jmxUrl) {
+    Map<String, BigDecimal> resultAttr = new HashMap<>();
+    try {
+      ObjectName name = new ObjectName(canonicalName);
+      var attrNames = msc.getMBeanInfo(name).getAttributes();
+      for (MBeanAttributeInfo attrName : attrNames) {
+        var value = msc.getAttribute(name, attrName.getName());
+        if (value instanceof Number) {
+          if (!(value instanceof Double) || !((Double) value).isInfinite()) {
+            resultAttr.put(attrName.getName(), new BigDecimal(value.toString()));
+          }
         }
-        return resultAttr;
+      }
+    } catch (MalformedURLException url) {
+      log.error("Cannot create JmxServiceUrl from {}", jmxUrl);
+      closeConnectionExceptionally(jmxUrl, srv);
+    } catch (IOException io) {
+      log.error("Cannot connect to KafkaJmxServer with url {}", jmxUrl);
+      closeConnectionExceptionally(jmxUrl, srv);
+    } catch (MBeanException | AttributeNotFoundException
+        | InstanceNotFoundException | ReflectionException e) {
+      log.error("Cannot find attribute", e);
+      closeConnectionExceptionally(jmxUrl, srv);
+    } catch (MalformedObjectNameException objectNameE) {
+      log.error("Cannot create objectName", objectNameE);
+      closeConnectionExceptionally(jmxUrl, srv);
+    } catch (Exception e) {
+      log.error("Error while retrieving connection {} from pool", jmxUrl);
+      closeConnectionExceptionally(jmxUrl, srv);
     }
+    return resultAttr;
+  }
 
-    private void closeConnectionExceptionally(String url, JMXConnector srv) {
-        try {
-            pool.invalidateObject(url, srv);
-        } catch (Exception e) {
-            log.error("Cannot invalidate object in pool, {}", url);
-        }
+  private void closeConnectionExceptionally(String url, JMXConnector srv) {
+    try {
+      pool.invalidateObject(url, srv);
+    } catch (Exception e) {
+      log.error("Cannot invalidate object in pool, {}", url);
     }
+  }
 
-    public Metric reduceJmxMetrics (Metric metric1, Metric metric2) {
-        var result = new Metric();
-        Map<String, BigDecimal> value = Stream.concat(
-                metric1.getValue().entrySet().stream(),
-                metric2.getValue().entrySet().stream()
-        ).collect(Collectors.groupingBy(
-                Map.Entry::getKey,
-                Collectors.reducing(BigDecimal.ZERO, Map.Entry::getValue, BigDecimal::add)
-        ));
-        result.setName(metric1.getName());
-        result.setCanonicalName(metric1.getCanonicalName());
-        result.setParams(metric1.getParams());
-        result.setValue(value);
-        return result;
-    }
+  public Metric reduceJmxMetrics(Metric metric1, Metric metric2) {
+    var result = new Metric();
+    Map<String, BigDecimal> value = Stream.concat(
+        metric1.getValue().entrySet().stream(),
+        metric2.getValue().entrySet().stream()
+    ).collect(Collectors.groupingBy(
+        Map.Entry::getKey,
+        Collectors.reducing(BigDecimal.ZERO, Map.Entry::getValue, BigDecimal::add)
+    ));
+    result.setName(metric1.getName());
+    result.setCanonicalName(metric1.getCanonicalName());
+    result.setParams(metric1.getParams());
+    result.setValue(value);
+    return result;
+  }
 
-    private boolean isWellKnownMetric(Metric metric) {
-        final Optional<String> param = Optional.ofNullable(metric.getParams().get(NAME_METRIC_FIELD)).filter(p ->
-                Arrays.stream(JmxMetricsName.values()).map(Enum::name)
-                        .anyMatch(n -> n.equals(p))
+  private boolean isWellKnownMetric(Metric metric) {
+    final Optional<String> param =
+        Optional.ofNullable(metric.getParams().get(NAME_METRIC_FIELD)).filter(p ->
+            Arrays.stream(JmxMetricsName.values()).map(Enum::name)
+                .anyMatch(n -> n.equals(p))
         );
-        return metric.getCanonicalName().contains(KAFKA_SERVER_PARAM) && param.isPresent();
-    }
+    return metric.getCanonicalName().contains(KAFKA_SERVER_PARAM) && param.isPresent();
+  }
 }

+ 27 - 27
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxMetricsName.java

@@ -1,31 +1,31 @@
 package com.provectus.kafka.ui.util;
 
 public enum JmxMetricsName {
-    MessagesInPerSec,
-    BytesInPerSec,
-    ReplicationBytesInPerSec,
-    RequestsPerSec,
-    ErrorsPerSec,
-    MessageConversionsPerSec,
-    BytesOutPerSec,
-    ReplicationBytesOutPerSec,
-    NoKeyCompactedTopicRecordsPerSec,
-    InvalidMagicNumberRecordsPerSec,
-    InvalidMessageCrcRecordsPerSec,
-    InvalidOffsetOrSequenceRecordsPerSec,
-    UncleanLeaderElectionsPerSec,
-    IsrShrinksPerSec,
-    IsrExpandsPerSec,
-    ReassignmentBytesOutPerSec,
-    ReassignmentBytesInPerSec,
-    ProduceMessageConversionsPerSec,
-    FailedFetchRequestsPerSec,
-    ZooKeeperSyncConnectsPerSec,
-    BytesRejectedPerSec,
-    ZooKeeperAuthFailuresPerSec,
-    TotalFetchRequestsPerSec,
-    FailedIsrUpdatesPerSec,
-    IncrementalFetchSessionEvictionsPerSec,
-    FetchMessageConversionsPerSec,
-    FailedProduceRequestsPerSec
+  MessagesInPerSec,
+  BytesInPerSec,
+  ReplicationBytesInPerSec,
+  RequestsPerSec,
+  ErrorsPerSec,
+  MessageConversionsPerSec,
+  BytesOutPerSec,
+  ReplicationBytesOutPerSec,
+  NoKeyCompactedTopicRecordsPerSec,
+  InvalidMagicNumberRecordsPerSec,
+  InvalidMessageCrcRecordsPerSec,
+  InvalidOffsetOrSequenceRecordsPerSec,
+  UncleanLeaderElectionsPerSec,
+  IsrShrinksPerSec,
+  IsrExpandsPerSec,
+  ReassignmentBytesOutPerSec,
+  ReassignmentBytesInPerSec,
+  ProduceMessageConversionsPerSec,
+  FailedFetchRequestsPerSec,
+  ZooKeeperSyncConnectsPerSec,
+  BytesRejectedPerSec,
+  ZooKeeperAuthFailuresPerSec,
+  TotalFetchRequestsPerSec,
+  FailedIsrUpdatesPerSec,
+  IncrementalFetchSessionEvictionsPerSec,
+  FetchMessageConversionsPerSec,
+  FailedProduceRequestsPerSec
 }

+ 6 - 6
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxMetricsValueName.java

@@ -1,9 +1,9 @@
 package com.provectus.kafka.ui.util;
 
-public enum  JmxMetricsValueName {
-    Count,
-    OneMinuteRate,
-    FifteenMinuteRate,
-    FiveMinuteRate,
-    MeanRate
+public enum JmxMetricsValueName {
+  Count,
+  OneMinuteRate,
+  FifteenMinuteRate,
+  FiveMinuteRate,
+  MeanRate
 }

+ 19 - 20
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/JmxPoolFactory.java

@@ -1,34 +1,33 @@
 package com.provectus.kafka.ui.util;
 
+import java.io.IOException;
+import javax.management.remote.JMXConnector;
+import javax.management.remote.JMXConnectorFactory;
+import javax.management.remote.JMXServiceURL;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.pool2.BaseKeyedPooledObjectFactory;
 import org.apache.commons.pool2.PooledObject;
 import org.apache.commons.pool2.impl.DefaultPooledObject;
 
-import javax.management.remote.JMXConnector;
-import javax.management.remote.JMXConnectorFactory;
-import javax.management.remote.JMXServiceURL;
-import java.io.IOException;
-
 @Slf4j
 public class JmxPoolFactory extends BaseKeyedPooledObjectFactory<String, JMXConnector> {
 
-    @Override
-    public JMXConnector create(String s) throws Exception {
-        return JMXConnectorFactory.connect(new JMXServiceURL(s));
-    }
+  @Override
+  public JMXConnector create(String s) throws Exception {
+    return JMXConnectorFactory.connect(new JMXServiceURL(s));
+  }
 
-    @Override
-    public PooledObject<JMXConnector> wrap(JMXConnector jmxConnector) {
-        return new DefaultPooledObject<>(jmxConnector);
-    }
+  @Override
+  public PooledObject<JMXConnector> wrap(JMXConnector jmxConnector) {
+    return new DefaultPooledObject<>(jmxConnector);
+  }
 
-    @Override
-    public void destroyObject(String key, PooledObject<JMXConnector> p) {
-        try {
-            p.getObject().close();
-        } catch (IOException e) {
-            log.error("Cannot close connection with {}", key);
-        }
+  @Override
+  public void destroyObject(String key, PooledObject<JMXConnector> p) {
+    try {
+      p.getObject().close();
+    } catch (IOException e) {
+      log.error("Cannot close connection with {}", key);
     }
+  }
 }

+ 55 - 31
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/KafkaConstants.java

@@ -1,40 +1,64 @@
 package com.provectus.kafka.ui.util;
 
+import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_DELETE;
+import static org.apache.kafka.common.config.TopicConfig.COMPRESSION_TYPE_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.DELETE_RETENTION_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.FILE_DELETE_DELAY_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.FLUSH_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.INDEX_INTERVAL_BYTES_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.MAX_COMPACTION_LAG_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.MAX_MESSAGE_BYTES_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.MESSAGE_DOWNCONVERSION_ENABLE_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.PREALLOCATE_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.RETENTION_BYTES_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.RETENTION_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.SEGMENT_BYTES_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.SEGMENT_INDEX_BYTES_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.SEGMENT_JITTER_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.SEGMENT_MS_CONFIG;
+import static org.apache.kafka.common.config.TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG;
+
 import java.util.AbstractMap;
 import java.util.Map;
 
-import static org.apache.kafka.common.config.TopicConfig.*;
-
 public final class KafkaConstants {
 
-    private KafkaConstants() {
-    }
+  public static final Map<String, String> TOPIC_DEFAULT_CONFIGS = Map.ofEntries(
+      new AbstractMap.SimpleEntry<>(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_DELETE),
+      new AbstractMap.SimpleEntry<>(COMPRESSION_TYPE_CONFIG, "producer"),
+      new AbstractMap.SimpleEntry<>(DELETE_RETENTION_MS_CONFIG, "86400000"),
+      new AbstractMap.SimpleEntry<>(FILE_DELETE_DELAY_MS_CONFIG, "60000"),
+      new AbstractMap.SimpleEntry<>(FLUSH_MESSAGES_INTERVAL_CONFIG, "9223372036854775807"),
+      new AbstractMap.SimpleEntry<>(FLUSH_MS_CONFIG, "9223372036854775807"),
+      new AbstractMap.SimpleEntry<>("follower.replication.throttled.replicas", ""),
+      new AbstractMap.SimpleEntry<>(INDEX_INTERVAL_BYTES_CONFIG, "4096"),
+      new AbstractMap.SimpleEntry<>("leader.replication.throttled.replicas", ""),
+      new AbstractMap.SimpleEntry<>(MAX_COMPACTION_LAG_MS_CONFIG, "9223372036854775807"),
+      new AbstractMap.SimpleEntry<>(MAX_MESSAGE_BYTES_CONFIG, "1000012"),
+      new AbstractMap.SimpleEntry<>(MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG,
+          "9223372036854775807"),
+      new AbstractMap.SimpleEntry<>(MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime"),
+      new AbstractMap.SimpleEntry<>(MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.5"),
+      new AbstractMap.SimpleEntry<>(MIN_COMPACTION_LAG_MS_CONFIG, "0"),
+      new AbstractMap.SimpleEntry<>(MIN_IN_SYNC_REPLICAS_CONFIG, "1"),
+      new AbstractMap.SimpleEntry<>(PREALLOCATE_CONFIG, "false"),
+      new AbstractMap.SimpleEntry<>(RETENTION_BYTES_CONFIG, "-1"),
+      new AbstractMap.SimpleEntry<>(RETENTION_MS_CONFIG, "604800000"),
+      new AbstractMap.SimpleEntry<>(SEGMENT_BYTES_CONFIG, "1073741824"),
+      new AbstractMap.SimpleEntry<>(SEGMENT_INDEX_BYTES_CONFIG, "10485760"),
+      new AbstractMap.SimpleEntry<>(SEGMENT_JITTER_MS_CONFIG, "0"),
+      new AbstractMap.SimpleEntry<>(SEGMENT_MS_CONFIG, "604800000"),
+      new AbstractMap.SimpleEntry<>(UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "false"),
+      new AbstractMap.SimpleEntry<>(MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, "true")
+  );
 
-    public static final Map<String, String> TOPIC_DEFAULT_CONFIGS = Map.ofEntries(
-            new AbstractMap.SimpleEntry<>(CLEANUP_POLICY_CONFIG, CLEANUP_POLICY_DELETE),
-            new AbstractMap.SimpleEntry<>(COMPRESSION_TYPE_CONFIG, "producer"),
-            new AbstractMap.SimpleEntry<>(DELETE_RETENTION_MS_CONFIG, "86400000"),
-            new AbstractMap.SimpleEntry<>(FILE_DELETE_DELAY_MS_CONFIG, "60000"),
-            new AbstractMap.SimpleEntry<>(FLUSH_MESSAGES_INTERVAL_CONFIG, "9223372036854775807"),
-            new AbstractMap.SimpleEntry<>(FLUSH_MS_CONFIG, "9223372036854775807"),
-            new AbstractMap.SimpleEntry<>("follower.replication.throttled.replicas", ""),
-            new AbstractMap.SimpleEntry<>(INDEX_INTERVAL_BYTES_CONFIG, "4096"),
-            new AbstractMap.SimpleEntry<>("leader.replication.throttled.replicas", ""),
-            new AbstractMap.SimpleEntry<>(MAX_COMPACTION_LAG_MS_CONFIG, "9223372036854775807"),
-            new AbstractMap.SimpleEntry<>(MAX_MESSAGE_BYTES_CONFIG, "1000012"),
-            new AbstractMap.SimpleEntry<>(MESSAGE_TIMESTAMP_DIFFERENCE_MAX_MS_CONFIG, "9223372036854775807"),
-            new AbstractMap.SimpleEntry<>(MESSAGE_TIMESTAMP_TYPE_CONFIG, "CreateTime"),
-            new AbstractMap.SimpleEntry<>(MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.5"),
-            new AbstractMap.SimpleEntry<>(MIN_COMPACTION_LAG_MS_CONFIG, "0"),
-            new AbstractMap.SimpleEntry<>(MIN_IN_SYNC_REPLICAS_CONFIG, "1"),
-            new AbstractMap.SimpleEntry<>(PREALLOCATE_CONFIG, "false"),
-            new AbstractMap.SimpleEntry<>(RETENTION_BYTES_CONFIG, "-1"),
-            new AbstractMap.SimpleEntry<>(RETENTION_MS_CONFIG, "604800000"),
-            new AbstractMap.SimpleEntry<>(SEGMENT_BYTES_CONFIG, "1073741824"),
-            new AbstractMap.SimpleEntry<>(SEGMENT_INDEX_BYTES_CONFIG, "10485760"),
-            new AbstractMap.SimpleEntry<>(SEGMENT_JITTER_MS_CONFIG, "0"),
-            new AbstractMap.SimpleEntry<>(SEGMENT_MS_CONFIG, "604800000"),
-            new AbstractMap.SimpleEntry<>(UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "false"),
-            new AbstractMap.SimpleEntry<>(MESSAGE_DOWNCONVERSION_ENABLE_CONFIG, "true")
-    );
+  private KafkaConstants() {
+  }
 }

+ 39 - 39
kafka-ui-api/src/test/java/com/provectus/kafka/ui/AbstractBaseTest.java

@@ -17,44 +17,44 @@ import org.testcontainers.utility.DockerImageName;
 @SpringBootTest
 @ActiveProfiles("test")
 public abstract class AbstractBaseTest {
-    public static String LOCAL = "local";
-    public static String SECOND_LOCAL = "secondLocal";
-
-    private static final String CONFLUENT_PLATFORM_VERSION = "5.5.0";
-
-    public static final KafkaContainer kafka = new KafkaContainer(DockerImageName.parse("confluentinc/cp-kafka").withTag(CONFLUENT_PLATFORM_VERSION))
-            .withNetwork(Network.SHARED);
-
-    public static final SchemaRegistryContainer schemaRegistry = new SchemaRegistryContainer(CONFLUENT_PLATFORM_VERSION)
-            .withKafka(kafka)
-            .dependsOn(kafka);
-
-    public static final KafkaConnectContainer kafkaConnect = new KafkaConnectContainer(CONFLUENT_PLATFORM_VERSION)
-            .withKafka(kafka)
-            .dependsOn(kafka)
-            .dependsOn(schemaRegistry);
-
-    static {
-        kafka.start();
-        schemaRegistry.start();
-        kafkaConnect.start();
-    }
-
-    public static class Initializer implements ApplicationContextInitializer<ConfigurableApplicationContext> {
-        @Override
-        public void initialize(@NotNull ConfigurableApplicationContext context) {
-            System.setProperty("kafka.clusters.0.name", LOCAL);
-            System.setProperty("kafka.clusters.0.bootstrapServers", kafka.getBootstrapServers());
-            System.setProperty("kafka.clusters.0.schemaRegistry", schemaRegistry.getTarget());
-            System.setProperty("kafka.clusters.0.kafkaConnect.0.name", "kafka-connect");
-            System.setProperty("kafka.clusters.0.kafkaConnect.0.address", kafkaConnect.getTarget());
-
-            System.setProperty("kafka.clusters.1.name", SECOND_LOCAL);
-            System.setProperty("kafka.clusters.1.readOnly", "true");
-            System.setProperty("kafka.clusters.1.bootstrapServers", kafka.getBootstrapServers());
-            System.setProperty("kafka.clusters.1.schemaRegistry", schemaRegistry.getTarget());
-            System.setProperty("kafka.clusters.1.kafkaConnect.0.name", "kafka-connect");
-            System.setProperty("kafka.clusters.1.kafkaConnect.0.address", kafkaConnect.getTarget());
-        }
+  private static final String CONFLUENT_PLATFORM_VERSION = "5.5.0";
+  public static final KafkaContainer kafka = new KafkaContainer(
+      DockerImageName.parse("confluentinc/cp-kafka").withTag(CONFLUENT_PLATFORM_VERSION))
+      .withNetwork(Network.SHARED);
+  public static final SchemaRegistryContainer schemaRegistry =
+      new SchemaRegistryContainer(CONFLUENT_PLATFORM_VERSION)
+          .withKafka(kafka)
+          .dependsOn(kafka);
+  public static final KafkaConnectContainer kafkaConnect =
+      new KafkaConnectContainer(CONFLUENT_PLATFORM_VERSION)
+          .withKafka(kafka)
+          .dependsOn(kafka)
+          .dependsOn(schemaRegistry);
+  public static String LOCAL = "local";
+  public static String SECOND_LOCAL = "secondLocal";
+
+  static {
+    kafka.start();
+    schemaRegistry.start();
+    kafkaConnect.start();
+  }
+
+  public static class Initializer
+      implements ApplicationContextInitializer<ConfigurableApplicationContext> {
+    @Override
+    public void initialize(@NotNull ConfigurableApplicationContext context) {
+      System.setProperty("kafka.clusters.0.name", LOCAL);
+      System.setProperty("kafka.clusters.0.bootstrapServers", kafka.getBootstrapServers());
+      System.setProperty("kafka.clusters.0.schemaRegistry", schemaRegistry.getTarget());
+      System.setProperty("kafka.clusters.0.kafkaConnect.0.name", "kafka-connect");
+      System.setProperty("kafka.clusters.0.kafkaConnect.0.address", kafkaConnect.getTarget());
+
+      System.setProperty("kafka.clusters.1.name", SECOND_LOCAL);
+      System.setProperty("kafka.clusters.1.readOnly", "true");
+      System.setProperty("kafka.clusters.1.bootstrapServers", kafka.getBootstrapServers());
+      System.setProperty("kafka.clusters.1.schemaRegistry", schemaRegistry.getTarget());
+      System.setProperty("kafka.clusters.1.kafkaConnect.0.name", "kafka-connect");
+      System.setProperty("kafka.clusters.1.kafkaConnect.0.address", kafkaConnect.getTarget());
     }
+  }
 }

+ 284 - 264
kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConnectServiceTests.java

@@ -1,6 +1,19 @@
 package com.provectus.kafka.ui;
 
-import com.provectus.kafka.ui.model.*;
+import static java.util.function.Predicate.not;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import com.provectus.kafka.ui.model.Connector;
+import com.provectus.kafka.ui.model.ConnectorPlugin;
+import com.provectus.kafka.ui.model.ConnectorPluginConfig;
+import com.provectus.kafka.ui.model.ConnectorPluginConfigValidationResponse;
+import com.provectus.kafka.ui.model.ConnectorPluginConfigValue;
+import com.provectus.kafka.ui.model.ConnectorStatus;
+import com.provectus.kafka.ui.model.NewConnector;
+import com.provectus.kafka.ui.model.TaskId;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
 import lombok.extern.log4j.Log4j2;
 import org.junit.jupiter.api.AfterEach;
 import org.junit.jupiter.api.BeforeEach;
@@ -11,293 +24,300 @@ import org.springframework.core.ParameterizedTypeReference;
 import org.springframework.test.context.ContextConfiguration;
 import org.springframework.test.web.reactive.server.WebTestClient;
 
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-
-import static java.util.function.Predicate.not;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
 @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
 @Log4j2
 @AutoConfigureWebTestClient(timeout = "60000")
 public class KafkaConnectServiceTests extends AbstractBaseTest {
-    private final String connectName = "kafka-connect";
-    private final String connectorName = UUID.randomUUID().toString();
-    private final Map<String, Object> config = Map.of(
-            "name", connectorName,
-            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-            "tasks.max", "1",
-            "topics", "output-topic",
-            "file", "/tmp/test"
-    );
+  private final String connectName = "kafka-connect";
+  private final String connectorName = UUID.randomUUID().toString();
+  private final Map<String, Object> config = Map.of(
+      "name", connectorName,
+      "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+      "tasks.max", "1",
+      "topics", "output-topic",
+      "file", "/tmp/test"
+  );
 
-    @Autowired
-    private WebTestClient webTestClient;
+  @Autowired
+  private WebTestClient webTestClient;
 
 
-    @BeforeEach
-    public void setUp() {
-        webTestClient.post()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
-                .bodyValue(new NewConnector()
-                        .name(connectorName)
-                        .config(Map.of(
-                                "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                                "tasks.max", "1",
-                                "topics", "output-topic",
-                                "file", "/tmp/test"
-                        ))
-                )
-                .exchange()
-                .expectStatus().isOk();
-    }
+  @BeforeEach
+  public void setUp() {
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
+        .bodyValue(new NewConnector()
+            .name(connectorName)
+            .config(Map.of(
+                "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+                "tasks.max", "1",
+                "topics", "output-topic",
+                "file", "/tmp/test"
+            ))
+        )
+        .exchange()
+        .expectStatus().isOk();
+  }
 
-    @AfterEach
-    public void tearDown() {
-        webTestClient.delete()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}", LOCAL, connectName, connectorName)
-                .exchange()
-                .expectStatus().isOk();
-    }
+  @AfterEach
+  public void tearDown() {
+    webTestClient.delete()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}", LOCAL,
+            connectName, connectorName)
+        .exchange()
+        .expectStatus().isOk();
+  }
 
-    @Test
-    public void shouldListConnectors() {
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody()
-                .jsonPath(String.format("$[?(@ == '%s')]", connectorName))
-                .exists();
-    }
+  @Test
+  public void shouldListConnectors() {
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody()
+        .jsonPath(String.format("$[?(@ == '%s')]", connectorName))
+        .exists();
+  }
 
-    @Test
-    public void shouldReturnNotFoundForNonExistingCluster() {
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", "nonExistingCluster", connectName)
-                .exchange()
-                .expectStatus().isNotFound();
-    }
+  @Test
+  public void shouldReturnNotFoundForNonExistingCluster() {
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", "nonExistingCluster",
+            connectName)
+        .exchange()
+        .expectStatus().isNotFound();
+  }
 
-    @Test
-    public void shouldReturnNotFoundForNonExistingConnectName() {
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, "nonExistingConnect")
-                .exchange()
-                .expectStatus().isNotFound();
-    }
+  @Test
+  public void shouldReturnNotFoundForNonExistingConnectName() {
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL,
+            "nonExistingConnect")
+        .exchange()
+        .expectStatus().isNotFound();
+  }
 
-    @Test
-    public void shouldRetrieveConnector() {
-        Connector expected = (Connector) new Connector()
-                .status(new ConnectorStatus()
-                        .state(ConnectorStatus.StateEnum.RUNNING)
-                        .workerId("kafka-connect:8083"))
-                .tasks(List.of(new TaskId()
-                        .connector(connectorName)
-                        .task(0)))
-                .type(Connector.TypeEnum.SINK)
-                .name(connectorName)
-                .config(config);
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}", LOCAL, connectName, connectorName)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody(Connector.class)
-                .value(connector -> assertEquals(expected, connector));
-    }
+  @Test
+  public void shouldRetrieveConnector() {
+    Connector expected = (Connector) new Connector()
+        .status(new ConnectorStatus()
+            .state(ConnectorStatus.StateEnum.RUNNING)
+            .workerId("kafka-connect:8083"))
+        .tasks(List.of(new TaskId()
+            .connector(connectorName)
+            .task(0)))
+        .type(Connector.TypeEnum.SINK)
+        .name(connectorName)
+        .config(config);
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}", LOCAL,
+            connectName, connectorName)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody(Connector.class)
+        .value(connector -> assertEquals(expected, connector));
+  }
 
-    @Test
-    public void shouldUpdateConfig() {
-        webTestClient.put()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config", LOCAL, connectName, connectorName)
-                .bodyValue(Map.of(
-                        "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                        "tasks.max", "1",
-                        "topics", "another-topic",
-                        "file", "/tmp/new"
-                        )
-                )
-                .exchange()
-                .expectStatus().isOk();
+  @Test
+  public void shouldUpdateConfig() {
+    webTestClient.put()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config",
+            LOCAL, connectName, connectorName)
+        .bodyValue(Map.of(
+            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+            "tasks.max", "1",
+            "topics", "another-topic",
+            "file", "/tmp/new"
+            )
+        )
+        .exchange()
+        .expectStatus().isOk();
 
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config", LOCAL, connectName, connectorName)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody(new ParameterizedTypeReference<Map<String, Object>>() {
-                })
-                .isEqualTo(Map.of(
-                        "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                        "tasks.max", "1",
-                        "topics", "another-topic",
-                        "file", "/tmp/new",
-                        "name", connectorName
-                ));
-    }
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config",
+            LOCAL, connectName, connectorName)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody(new ParameterizedTypeReference<Map<String, Object>>() {
+        })
+        .isEqualTo(Map.of(
+            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+            "tasks.max", "1",
+            "topics", "another-topic",
+            "file", "/tmp/new",
+            "name", connectorName
+        ));
+  }
 
-    @Test
-    public void shouldReturn400WhenConnectReturns400ForInvalidConfigCreate() {
-        var connectorName = UUID.randomUUID().toString();
-        webTestClient.post()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
-                .bodyValue(Map.of(
-                        "name", connectorName,
-                        "config", Map.of(
-                                "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                                "tasks.max", "invalid number",
-                                "topics", "another-topic",
-                                "file", "/tmp/test"
-                        ))
-                )
-                .exchange()
-                .expectStatus().isBadRequest();
+  @Test
+  public void shouldReturn400WhenConnectReturns400ForInvalidConfigCreate() {
+    var connectorName = UUID.randomUUID().toString();
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
+        .bodyValue(Map.of(
+            "name", connectorName,
+            "config", Map.of(
+                "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+                "tasks.max", "invalid number",
+                "topics", "another-topic",
+                "file", "/tmp/test"
+            ))
+        )
+        .exchange()
+        .expectStatus().isBadRequest();
 
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody()
-                .jsonPath(String.format("$[?(@ == '%s')]", connectorName))
-                .doesNotExist();
-    }
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody()
+        .jsonPath(String.format("$[?(@ == '%s')]", connectorName))
+        .doesNotExist();
+  }
 
-    @Test
-    public void shouldReturn400WhenConnectReturns500ForInvalidConfigCreate() {
-        var connectorName = UUID.randomUUID().toString();
-        webTestClient.post()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
-                .bodyValue(Map.of(
-                        "name", connectorName,
-                        "config", Map.of(
-                                "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector"
-                        ))
-                )
-                .exchange()
-                .expectStatus().isBadRequest();
+  @Test
+  public void shouldReturn400WhenConnectReturns500ForInvalidConfigCreate() {
+    var connectorName = UUID.randomUUID().toString();
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
+        .bodyValue(Map.of(
+            "name", connectorName,
+            "config", Map.of(
+                "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector"
+            ))
+        )
+        .exchange()
+        .expectStatus().isBadRequest();
 
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody()
-                .jsonPath(String.format("$[?(@ == '%s')]", connectorName))
-                .doesNotExist();
-    }
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors", LOCAL, connectName)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody()
+        .jsonPath(String.format("$[?(@ == '%s')]", connectorName))
+        .doesNotExist();
+  }
 
 
-    @Test
-    public void shouldReturn400WhenConnectReturns400ForInvalidConfigUpdate() {
-        webTestClient.put()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config", LOCAL, connectName, connectorName)
-                .bodyValue(Map.of(
-                        "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                        "tasks.max", "invalid number",
-                        "topics", "another-topic",
-                        "file", "/tmp/test"
-                        )
-                )
-                .exchange()
-                .expectStatus().isBadRequest();
+  @Test
+  public void shouldReturn400WhenConnectReturns400ForInvalidConfigUpdate() {
+    webTestClient.put()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config",
+            LOCAL, connectName, connectorName)
+        .bodyValue(Map.of(
+            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+            "tasks.max", "invalid number",
+            "topics", "another-topic",
+            "file", "/tmp/test"
+            )
+        )
+        .exchange()
+        .expectStatus().isBadRequest();
 
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config", LOCAL, connectName, connectorName)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody(new ParameterizedTypeReference<Map<String, Object>>() {
-                })
-                .isEqualTo(Map.of(
-                        "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                        "tasks.max", "1",
-                        "topics", "output-topic",
-                        "file", "/tmp/test",
-                        "name", connectorName
-                ));
-    }
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config",
+            LOCAL, connectName, connectorName)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody(new ParameterizedTypeReference<Map<String, Object>>() {
+        })
+        .isEqualTo(Map.of(
+            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+            "tasks.max", "1",
+            "topics", "output-topic",
+            "file", "/tmp/test",
+            "name", connectorName
+        ));
+  }
 
-    @Test
-    public void shouldReturn400WhenConnectReturns500ForInvalidConfigUpdate() {
-        webTestClient.put()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config", LOCAL, connectName, connectorName)
-                .bodyValue(Map.of(
-                        "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector"
-                        )
-                )
-                .exchange()
-                .expectStatus().isBadRequest();
+  @Test
+  public void shouldReturn400WhenConnectReturns500ForInvalidConfigUpdate() {
+    webTestClient.put()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config",
+            LOCAL, connectName, connectorName)
+        .bodyValue(Map.of(
+            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector"
+            )
+        )
+        .exchange()
+        .expectStatus().isBadRequest();
 
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config", LOCAL, connectName, connectorName)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody(new ParameterizedTypeReference<Map<String, Object>>() {
-                })
-                .isEqualTo(Map.of(
-                        "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                        "tasks.max", "1",
-                        "topics", "output-topic",
-                        "file", "/tmp/test",
-                        "name", connectorName
-                ));
-    }
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/connectors/{connectorName}/config",
+            LOCAL, connectName, connectorName)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody(new ParameterizedTypeReference<Map<String, Object>>() {
+        })
+        .isEqualTo(Map.of(
+            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+            "tasks.max", "1",
+            "topics", "output-topic",
+            "file", "/tmp/test",
+            "name", connectorName
+        ));
+  }
 
-    @Test
-    public void shouldRetrieveConnectorPlugins() {
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/plugins", LOCAL, connectName)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBodyList(ConnectorPlugin.class)
-                .value(plugins -> assertEquals(14, plugins.size()));
-    }
+  @Test
+  public void shouldRetrieveConnectorPlugins() {
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/connects/{connectName}/plugins", LOCAL, connectName)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBodyList(ConnectorPlugin.class)
+        .value(plugins -> assertEquals(14, plugins.size()));
+  }
 
-    @Test
-    public void shouldSuccessfullyValidateConnectorPluginConfiguration() {
-        var pluginName = "FileStreamSinkConnector";
-        webTestClient.put()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/plugins/{pluginName}/config/validate", LOCAL, connectName, pluginName)
-                .bodyValue(Map.of(
-                        "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                        "tasks.max", "1",
-                        "topics", "output-topic",
-                        "file", "/tmp/test",
-                        "name", connectorName
-                        )
-                )
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody(ConnectorPluginConfigValidationResponse.class)
-                .value(response -> assertEquals(0, response.getErrorCount()));
-    }
+  @Test
+  public void shouldSuccessfullyValidateConnectorPluginConfiguration() {
+    var pluginName = "FileStreamSinkConnector";
+    var path =
+        "/api/clusters/{clusterName}/connects/{connectName}/plugins/{pluginName}/config/validate";
+    webTestClient.put()
+        .uri(path, LOCAL, connectName, pluginName)
+        .bodyValue(Map.of(
+            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+            "tasks.max", "1",
+            "topics", "output-topic",
+            "file", "/tmp/test",
+            "name", connectorName
+            )
+        )
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody(ConnectorPluginConfigValidationResponse.class)
+        .value(response -> assertEquals(0, response.getErrorCount()));
+  }
 
-    @Test
-    public void shouldValidateAndReturnErrorsOfConnectorPluginConfiguration() {
-        var pluginName = "FileStreamSinkConnector";
-        webTestClient.put()
-                .uri("/api/clusters/{clusterName}/connects/{connectName}/plugins/{pluginName}/config/validate", LOCAL, connectName, pluginName)
-                .bodyValue(Map.of(
-                        "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
-                        "tasks.max", "0",
-                        "topics", "output-topic",
-                        "file", "/tmp/test",
-                        "name", connectorName
-                        )
-                )
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody(ConnectorPluginConfigValidationResponse.class)
-                .value(response -> {
-                    assertEquals(1, response.getErrorCount());
-                    var error = response.getConfigs().stream()
-                            .map(ConnectorPluginConfig::getValue)
-                            .map(ConnectorPluginConfigValue::getErrors)
-                            .filter(not(List::isEmpty))
-                            .findFirst().get();
-                    assertEquals(
-                            "Invalid value 0 for configuration tasks.max: Value must be at least 1",
-                            error.get(0)
-                    );
-                });
-    }
+  @Test
+  public void shouldValidateAndReturnErrorsOfConnectorPluginConfiguration() {
+    var pluginName = "FileStreamSinkConnector";
+    var path =
+        "/api/clusters/{clusterName}/connects/{connectName}/plugins/{pluginName}/config/validate";
+    webTestClient.put()
+        .uri(path, LOCAL, connectName, pluginName)
+        .bodyValue(Map.of(
+            "connector.class", "org.apache.kafka.connect.file.FileStreamSinkConnector",
+            "tasks.max", "0",
+            "topics", "output-topic",
+            "file", "/tmp/test",
+            "name", connectorName
+            )
+        )
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody(ConnectorPluginConfigValidationResponse.class)
+        .value(response -> {
+          assertEquals(1, response.getErrorCount());
+          var error = response.getConfigs().stream()
+              .map(ConnectorPluginConfig::getValue)
+              .map(ConnectorPluginConfigValue::getErrors)
+              .filter(not(List::isEmpty))
+              .findFirst().get();
+          assertEquals(
+              "Invalid value 0 for configuration tasks.max: Value must be at least 1",
+              error.get(0)
+          );
+        });
+  }
 }

+ 52 - 53
kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java

@@ -3,6 +3,9 @@ package com.provectus.kafka.ui;
 import com.provectus.kafka.ui.model.TopicFormData;
 import com.provectus.kafka.ui.model.TopicMessage;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
+import java.util.Map;
+import java.util.UUID;
+import java.util.stream.Stream;
 import lombok.extern.log4j.Log4j2;
 import org.junit.jupiter.api.Test;
 import org.springframework.beans.factory.annotation.Autowired;
@@ -10,70 +13,66 @@ import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWeb
 import org.springframework.test.context.ContextConfiguration;
 import org.springframework.test.web.reactive.server.WebTestClient;
 
-import java.util.Map;
-import java.util.UUID;
-import java.util.stream.Stream;
-
 @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
 @Log4j2
 @AutoConfigureWebTestClient(timeout = "60000")
 public class KafkaConsumerTests extends AbstractBaseTest {
 
-    @Autowired
-    private WebTestClient webTestClient;
+  @Autowired
+  private WebTestClient webTestClient;
 
 
-    @Test
-    public void shouldDeleteRecords() {
-        var topicName = UUID.randomUUID().toString();
-        webTestClient.post()
-                .uri("/api/clusters/{clusterName}/topics", LOCAL)
-                .bodyValue(new TopicFormData()
-                        .name(topicName)
-                        .partitions(1)
-                        .replicationFactor(1)
-                        .configs(Map.of())
-                )
-                .exchange()
-                .expectStatus()
-                .isOk();
+  @Test
+  public void shouldDeleteRecords() {
+    var topicName = UUID.randomUUID().toString();
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/topics", LOCAL)
+        .bodyValue(new TopicFormData()
+            .name(topicName)
+            .partitions(1)
+            .replicationFactor(1)
+            .configs(Map.of())
+        )
+        .exchange()
+        .expectStatus()
+        .isOk();
 
-        try(KafkaTestProducer<String, String> producer = KafkaTestProducer.forKafka(kafka)) {
-            Stream.of("one", "two", "three", "four")
-                    .forEach(value -> producer.send(topicName, value));
-        }
+    try (KafkaTestProducer<String, String> producer = KafkaTestProducer.forKafka(kafka)) {
+      Stream.of("one", "two", "three", "four")
+          .forEach(value -> producer.send(topicName, value));
+    }
 
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
-                .exchange()
-                .expectStatus()
-                .isOk()
-                .expectBodyList(TopicMessage.class)
-                .hasSize(4);
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
+        .exchange()
+        .expectStatus()
+        .isOk()
+        .expectBodyList(TopicMessage.class)
+        .hasSize(4);
 
-        webTestClient.delete()
-                .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
-                .exchange()
-                .expectStatus()
-                .isOk();
+    webTestClient.delete()
+        .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
+        .exchange()
+        .expectStatus()
+        .isOk();
 
-        webTestClient.get()
-                .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
-                .exchange()
-                .expectStatus()
-                .isOk()
-                .expectBodyList(TopicMessage.class)
-                .hasSize(0);
-    }
+    webTestClient.get()
+        .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
+        .exchange()
+        .expectStatus()
+        .isOk()
+        .expectBodyList(TopicMessage.class)
+        .hasSize(0);
+  }
 
-    @Test
-    public void shouldReturn404ForNonExistingTopic() {
-        var topicName = UUID.randomUUID().toString();
+  @Test
+  public void shouldReturn404ForNonExistingTopic() {
+    var topicName = UUID.randomUUID().toString();
 
-        webTestClient.delete()
-                .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
-                .exchange()
-                .expectStatus()
-                .isNotFound();
-    }
+    webTestClient.delete()
+        .uri("/api/clusters/{clusterName}/topics/{topicName}/messages", LOCAL, topicName)
+        .exchange()
+        .expectStatus()
+        .isNotFound();
+  }
 }

+ 75 - 76
kafka-ui-api/src/test/java/com/provectus/kafka/ui/ReadOnlyModeTests.java

@@ -1,6 +1,8 @@
 package com.provectus.kafka.ui;
 
 import com.provectus.kafka.ui.model.TopicFormData;
+import java.util.Map;
+import java.util.UUID;
 import lombok.extern.log4j.Log4j2;
 import org.junit.jupiter.api.Test;
 import org.springframework.beans.factory.annotation.Autowired;
@@ -9,89 +11,86 @@ import org.springframework.http.HttpStatus;
 import org.springframework.test.context.ContextConfiguration;
 import org.springframework.test.web.reactive.server.WebTestClient;
 
-import java.util.Map;
-import java.util.UUID;
-
 @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
 @Log4j2
 @AutoConfigureWebTestClient(timeout = "60000")
 public class ReadOnlyModeTests extends AbstractBaseTest {
 
-    @Autowired
-    private WebTestClient webTestClient;
+  @Autowired
+  private WebTestClient webTestClient;
 
-    @Test
-    public void shouldCreateTopicForNonReadonlyCluster() {
-        var topicName = UUID.randomUUID().toString();
-        webTestClient.post()
-                .uri("/api/clusters/{clusterName}/topics", LOCAL)
-                .bodyValue(new TopicFormData()
-                        .name(topicName)
-                        .partitions(1)
-                        .replicationFactor(1)
-                        .configs(Map.of())
-                )
-                .exchange()
-                .expectStatus()
-                .isOk();
-    }
+  @Test
+  public void shouldCreateTopicForNonReadonlyCluster() {
+    var topicName = UUID.randomUUID().toString();
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/topics", LOCAL)
+        .bodyValue(new TopicFormData()
+            .name(topicName)
+            .partitions(1)
+            .replicationFactor(1)
+            .configs(Map.of())
+        )
+        .exchange()
+        .expectStatus()
+        .isOk();
+  }
 
-    @Test
-    public void shouldNotCreateTopicForReadonlyCluster() {
-        var topicName = UUID.randomUUID().toString();
-        webTestClient.post()
-                .uri("/api/clusters/{clusterName}/topics", SECOND_LOCAL)
-                .bodyValue(new TopicFormData()
-                        .name(topicName)
-                        .partitions(1)
-                        .replicationFactor(1)
-                        .configs(Map.of())
-                )
-                .exchange()
-                .expectStatus()
-                .isEqualTo(HttpStatus.METHOD_NOT_ALLOWED);
-    }
+  @Test
+  public void shouldNotCreateTopicForReadonlyCluster() {
+    var topicName = UUID.randomUUID().toString();
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/topics", SECOND_LOCAL)
+        .bodyValue(new TopicFormData()
+            .name(topicName)
+            .partitions(1)
+            .replicationFactor(1)
+            .configs(Map.of())
+        )
+        .exchange()
+        .expectStatus()
+        .isEqualTo(HttpStatus.METHOD_NOT_ALLOWED);
+  }
 
-    @Test
-    public void shouldUpdateTopicForNonReadonlyCluster() {
-        var topicName = UUID.randomUUID().toString();
-        webTestClient.post()
-                .uri("/api/clusters/{clusterName}/topics", LOCAL)
-                .bodyValue(new TopicFormData()
-                        .name(topicName)
-                        .partitions(1)
-                        .replicationFactor(1)
-                        .configs(Map.of())
-                )
-                .exchange()
-                .expectStatus()
-                .isOk();
-        webTestClient.patch()
-                .uri("/api/clusters/{clusterName}/topics/{topicName}", LOCAL, topicName)
-                .bodyValue(new TopicFormData()
-                        .name(topicName)
-                        .partitions(2)
-                        .replicationFactor(1)
-                        .configs(Map.of())
-                )
-                .exchange()
-                .expectStatus()
-                .isOk();
-    }
+  @Test
+  public void shouldUpdateTopicForNonReadonlyCluster() {
+    var topicName = UUID.randomUUID().toString();
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/topics", LOCAL)
+        .bodyValue(new TopicFormData()
+            .name(topicName)
+            .partitions(1)
+            .replicationFactor(1)
+            .configs(Map.of())
+        )
+        .exchange()
+        .expectStatus()
+        .isOk();
+    webTestClient.patch()
+        .uri("/api/clusters/{clusterName}/topics/{topicName}", LOCAL, topicName)
+        .bodyValue(new TopicFormData()
+            .name(topicName)
+            .partitions(2)
+            .replicationFactor(1)
+            .configs(Map.of())
+        )
+        .exchange()
+        .expectStatus()
+        .isOk();
+  }
 
-    @Test
-    public void shouldNotUpdateTopicForReadonlyCluster() {
-        var topicName = UUID.randomUUID().toString();
-        webTestClient.patch()
-                .uri("/api/clusters/{clusterName}/topics/{topicName}", SECOND_LOCAL, topicName)
-                .bodyValue(new TopicFormData()
-                        .name(topicName)
-                        .partitions(1)
-                        .replicationFactor(1)
-                        .configs(Map.of())
-                )
-                .exchange()
-                .expectStatus()
-                .isEqualTo(HttpStatus.METHOD_NOT_ALLOWED);
-    }
+  @Test
+  public void shouldNotUpdateTopicForReadonlyCluster() {
+    var topicName = UUID.randomUUID().toString();
+    webTestClient.patch()
+        .uri("/api/clusters/{clusterName}/topics/{topicName}", SECOND_LOCAL, topicName)
+        .bodyValue(new TopicFormData()
+            .name(topicName)
+            .partitions(1)
+            .replicationFactor(1)
+            .configs(Map.of())
+        )
+        .exchange()
+        .expectStatus()
+        .isEqualTo(HttpStatus.METHOD_NOT_ALLOWED);
+  }
 }

+ 225 - 213
kafka-ui-api/src/test/java/com/provectus/kafka/ui/SchemaRegistryServiceTests.java

@@ -4,6 +4,8 @@ import com.provectus.kafka.ui.model.CompatibilityLevel;
 import com.provectus.kafka.ui.model.NewSchemaSubject;
 import com.provectus.kafka.ui.model.SchemaSubject;
 import com.provectus.kafka.ui.model.SchemaType;
+import java.util.List;
+import java.util.UUID;
 import lombok.extern.log4j.Log4j2;
 import lombok.val;
 import org.junit.jupiter.api.Assertions;
@@ -19,221 +21,231 @@ import org.springframework.test.web.reactive.server.WebTestClient;
 import org.springframework.web.reactive.function.BodyInserters;
 import reactor.core.publisher.Mono;
 
-import java.util.List;
-import java.util.UUID;
-
 @ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
 @Log4j2
 @AutoConfigureWebTestClient(timeout = "10000")
 class SchemaRegistryServiceTests extends AbstractBaseTest {
-    @Autowired
-    WebTestClient webTestClient;
-    String subject;
-
-    @BeforeEach
-    public void setUpBefore() {
-        this.subject = UUID.randomUUID().toString();
-    }
-
-    @Test
-    public void should404WhenGetAllSchemasForUnknownCluster() {
-        webTestClient
-                .get()
-                .uri("/api/clusters/unknown-cluster/schemas")
-                .exchange()
-                .expectStatus().isNotFound();
-    }
-
-    @Test
-    public void shouldReturn404WhenGetLatestSchemaByNonExistingSubject() {
-        String unknownSchema = "unknown-schema";
-        webTestClient
-                .get()
-                .uri("/api/clusters/{clusterName}/schemas/{subject}/latest", LOCAL, unknownSchema)
-                .exchange()
-                .expectStatus().isNotFound();
-    }
-
-    /**
-     * It should create a new schema w/o submitting a schemaType field to Schema Registry
-     */
-    @Test
-    void shouldBeBadRequestIfNoSchemaType() {
-        String schema = "{\"subject\":\"%s\",\"schema\":\"{\\\"type\\\": \\\"string\\\"}\"}";
-
-        webTestClient
-                .post()
-                .uri("/api/clusters/{clusterName}/schemas", LOCAL)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromValue(String.format(schema, subject)))
-                .exchange()
-                .expectStatus().isBadRequest();
-    }
-
-    @Test
-    void shouldReturn409WhenSchemaDuplicatesThePreviousVersion() {
-        String schema = "{\"subject\":\"%s\",\"schemaType\":\"AVRO\",\"schema\":\"{\\\"type\\\": \\\"string\\\"}\"}";
-
-        webTestClient
-                .post()
-                .uri("/api/clusters/{clusterName}/schemas", LOCAL)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromValue(String.format(schema, subject)))
-                .exchange()
-                .expectStatus().isEqualTo(HttpStatus.OK);
-
-        webTestClient
-                .post()
-                .uri("/api/clusters/{clusterName}/schemas", LOCAL)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromValue(String.format(schema, subject)))
-                .exchange()
-                .expectStatus().isEqualTo(HttpStatus.CONFLICT);
-    }
-
-    @Test
-    void shouldCreateNewProtobufSchema() {
-        String schema = "syntax = \"proto3\";\n\nmessage MyRecord {\n  int32 id = 1;\n  string name = 2;\n}\n";
-        NewSchemaSubject requestBody = new NewSchemaSubject()
-                .schemaType(SchemaType.PROTOBUF)
-                .subject(subject)
-                .schema(schema);
-        SchemaSubject actual = webTestClient
-                .post()
-                .uri("/api/clusters/{clusterName}/schemas", LOCAL)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromPublisher(Mono.just(requestBody), NewSchemaSubject.class))
-                .exchange()
-                .expectStatus()
-                .isOk()
-                .expectBody(SchemaSubject.class)
-                .returnResult()
-                .getResponseBody();
-
-        Assertions.assertNotNull(actual);
-        Assertions.assertEquals(CompatibilityLevel.CompatibilityEnum.BACKWARD.name(), actual.getCompatibilityLevel());
-        Assertions.assertEquals("1", actual.getVersion());
-        Assertions.assertEquals(SchemaType.PROTOBUF, actual.getSchemaType());
-        Assertions.assertEquals(schema, actual.getSchema());
-    }
-
-    @Test
-    public void shouldReturnBackwardAsGlobalCompatibilityLevelByDefault() {
-        webTestClient
-                .get()
-                .uri("/api/clusters/{clusterName}/schemas/compatibility", LOCAL)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody(CompatibilityLevel.class)
-                .consumeWith(result -> {
-                    CompatibilityLevel responseBody = result.getResponseBody();
-                    Assertions.assertNotNull(responseBody);
-                    Assertions.assertEquals(CompatibilityLevel.CompatibilityEnum.BACKWARD, responseBody.getCompatibility());
-                });
-    }
-
-    @Test
-    public void shouldReturnNotEmptyResponseWhenGetAllSchemas() {
-        createNewSubjectAndAssert(subject);
-
-        webTestClient
-                .get()
-                .uri("/api/clusters/{clusterName}/schemas", LOCAL)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBodyList(SchemaSubject.class)
-                .consumeWith(result -> {
-                    List<SchemaSubject> responseBody = result.getResponseBody();
-                    log.info("Response of test schemas: {}", responseBody);
-                    Assertions.assertNotNull(responseBody);
-                    Assertions.assertFalse(responseBody.isEmpty());
-
-                    SchemaSubject actualSchemaSubject = responseBody.stream()
-                            .filter(schemaSubject -> subject.equals(schemaSubject.getSubject()))
-                            .findFirst()
-                            .orElseThrow();
-                    Assertions.assertNotNull(actualSchemaSubject.getId());
-                    Assertions.assertNotNull(actualSchemaSubject.getVersion());
-                    Assertions.assertNotNull(actualSchemaSubject.getCompatibilityLevel());
-                    Assertions.assertEquals("\"string\"", actualSchemaSubject.getSchema());
-                });
-    }
-
-    @Test
-    public void shouldOkWhenCreateNewSchemaThenGetAndUpdateItsCompatibilityLevel() {
-        createNewSubjectAndAssert(subject);
-
-        //Get the created schema and check its items
-        webTestClient
-                .get()
-                .uri("/api/clusters/{clusterName}/schemas/{subject}/latest", LOCAL, subject)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBodyList(SchemaSubject.class)
-                .consumeWith(listEntityExchangeResult -> {
-                    val expectedCompatibility = CompatibilityLevel.CompatibilityEnum.BACKWARD;
-                    assertSchemaWhenGetLatest(subject, listEntityExchangeResult, expectedCompatibility);
-                });
-
-        //Now let's change compatibility level of this schema to FULL whereas the global level should be BACKWARD
-        webTestClient.put()
-                .uri("/api/clusters/{clusterName}/schemas/{subject}/compatibility", LOCAL, subject)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromValue("{\"compatibility\":\"FULL\"}"))
-                .exchange()
-                .expectStatus().isOk();
-
-        //Get one more time to check the schema compatibility level is changed to FULL
-        webTestClient
-                .get()
-                .uri("/api/clusters/{clusterName}/schemas/{subject}/latest", LOCAL, subject)
-                .exchange()
-                .expectStatus().isOk()
-                .expectBodyList(SchemaSubject.class)
-                .consumeWith(listEntityExchangeResult -> {
-                    val expectedCompatibility = CompatibilityLevel.CompatibilityEnum.FULL;
-                    assertSchemaWhenGetLatest(subject, listEntityExchangeResult, expectedCompatibility);
-                });
-    }
-
-    private void createNewSubjectAndAssert(String subject) {
-        webTestClient
-                .post()
-                .uri("/api/clusters/{clusterName}/schemas", LOCAL)
-                .contentType(MediaType.APPLICATION_JSON)
-                .body(BodyInserters.fromValue(
-                    String.format(
-                        "{\"subject\":\"%s\",\"schemaType\":\"AVRO\",\"schema\":\"{\\\"type\\\": \\\"string\\\"}\"}",
-                        subject
-                    )
-                ))
-                .exchange()
-                .expectStatus().isOk()
-                .expectBody(SchemaSubject.class)
-                .consumeWith(this::assertResponseBodyWhenCreateNewSchema);
-    }
-
-    private void assertSchemaWhenGetLatest(String subject, EntityExchangeResult<List<SchemaSubject>> listEntityExchangeResult, CompatibilityLevel.CompatibilityEnum expectedCompatibility) {
-        List<SchemaSubject> responseBody = listEntityExchangeResult.getResponseBody();
-        Assertions.assertNotNull(responseBody);
-        Assertions.assertEquals(1, responseBody.size());
-        SchemaSubject actualSchema = responseBody.get(0);
-        Assertions.assertNotNull(actualSchema);
-        Assertions.assertEquals(subject, actualSchema.getSubject());
-        Assertions.assertEquals("\"string\"", actualSchema.getSchema());
-
-        Assertions.assertNotNull(actualSchema.getCompatibilityLevel());
-        Assertions.assertEquals(SchemaType.AVRO, actualSchema.getSchemaType());
-        Assertions.assertEquals(expectedCompatibility.name(), actualSchema.getCompatibilityLevel());
-    }
-
-    private void assertResponseBodyWhenCreateNewSchema(EntityExchangeResult<SchemaSubject> exchangeResult) {
-        SchemaSubject responseBody = exchangeResult.getResponseBody();
-        Assertions.assertNotNull(responseBody);
-        Assertions.assertEquals("1", responseBody.getVersion());
-        Assertions.assertNotNull(responseBody.getSchema());
-        Assertions.assertNotNull(responseBody.getSubject());
-        Assertions.assertNotNull(responseBody.getCompatibilityLevel());
-        Assertions.assertEquals(SchemaType.AVRO, responseBody.getSchemaType());
-    }
+  @Autowired
+  WebTestClient webTestClient;
+  String subject;
+
+  @BeforeEach
+  public void setUpBefore() {
+    this.subject = UUID.randomUUID().toString();
+  }
+
+  @Test
+  public void should404WhenGetAllSchemasForUnknownCluster() {
+    webTestClient
+        .get()
+        .uri("/api/clusters/unknown-cluster/schemas")
+        .exchange()
+        .expectStatus().isNotFound();
+  }
+
+  @Test
+  public void shouldReturn404WhenGetLatestSchemaByNonExistingSubject() {
+    String unknownSchema = "unknown-schema";
+    webTestClient
+        .get()
+        .uri("/api/clusters/{clusterName}/schemas/{subject}/latest", LOCAL, unknownSchema)
+        .exchange()
+        .expectStatus().isNotFound();
+  }
+
+  /**
+   * It should create a new schema w/o submitting a schemaType field to Schema Registry.
+   */
+  @Test
+  void shouldBeBadRequestIfNoSchemaType() {
+    String schema = "{\"subject\":\"%s\",\"schema\":\"{\\\"type\\\": \\\"string\\\"}\"}";
+
+    webTestClient
+        .post()
+        .uri("/api/clusters/{clusterName}/schemas", LOCAL)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromValue(String.format(schema, subject)))
+        .exchange()
+        .expectStatus().isBadRequest();
+  }
+
+  @Test
+  void shouldReturn409WhenSchemaDuplicatesThePreviousVersion() {
+    String schema =
+        "{\"subject\":\"%s\",\"schemaType\":\"AVRO\",\"schema\":"
+            + "\"{\\\"type\\\": \\\"string\\\"}\"}";
+
+    webTestClient
+        .post()
+        .uri("/api/clusters/{clusterName}/schemas", LOCAL)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromValue(String.format(schema, subject)))
+        .exchange()
+        .expectStatus().isEqualTo(HttpStatus.OK);
+
+    webTestClient
+        .post()
+        .uri("/api/clusters/{clusterName}/schemas", LOCAL)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromValue(String.format(schema, subject)))
+        .exchange()
+        .expectStatus().isEqualTo(HttpStatus.CONFLICT);
+  }
+
+  @Test
+  void shouldCreateNewProtobufSchema() {
+    String schema =
+        "syntax = \"proto3\";\n\nmessage MyRecord {\n  int32 id = 1;\n  string name = 2;\n}\n";
+    NewSchemaSubject requestBody = new NewSchemaSubject()
+        .schemaType(SchemaType.PROTOBUF)
+        .subject(subject)
+        .schema(schema);
+    SchemaSubject actual = webTestClient
+        .post()
+        .uri("/api/clusters/{clusterName}/schemas", LOCAL)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromPublisher(Mono.just(requestBody), NewSchemaSubject.class))
+        .exchange()
+        .expectStatus()
+        .isOk()
+        .expectBody(SchemaSubject.class)
+        .returnResult()
+        .getResponseBody();
+
+    Assertions.assertNotNull(actual);
+    Assertions.assertEquals(CompatibilityLevel.CompatibilityEnum.BACKWARD.name(),
+        actual.getCompatibilityLevel());
+    Assertions.assertEquals("1", actual.getVersion());
+    Assertions.assertEquals(SchemaType.PROTOBUF, actual.getSchemaType());
+    Assertions.assertEquals(schema, actual.getSchema());
+  }
+
+  @Test
+  public void shouldReturnBackwardAsGlobalCompatibilityLevelByDefault() {
+    webTestClient
+        .get()
+        .uri("/api/clusters/{clusterName}/schemas/compatibility", LOCAL)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody(CompatibilityLevel.class)
+        .consumeWith(result -> {
+          CompatibilityLevel responseBody = result.getResponseBody();
+          Assertions.assertNotNull(responseBody);
+          Assertions.assertEquals(CompatibilityLevel.CompatibilityEnum.BACKWARD,
+              responseBody.getCompatibility());
+        });
+  }
+
+  @Test
+  public void shouldReturnNotEmptyResponseWhenGetAllSchemas() {
+    createNewSubjectAndAssert(subject);
+
+    webTestClient
+        .get()
+        .uri("/api/clusters/{clusterName}/schemas", LOCAL)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBodyList(SchemaSubject.class)
+        .consumeWith(result -> {
+          List<SchemaSubject> responseBody = result.getResponseBody();
+          log.info("Response of test schemas: {}", responseBody);
+          Assertions.assertNotNull(responseBody);
+          Assertions.assertFalse(responseBody.isEmpty());
+
+          SchemaSubject actualSchemaSubject = responseBody.stream()
+              .filter(schemaSubject -> subject.equals(schemaSubject.getSubject()))
+              .findFirst()
+              .orElseThrow();
+          Assertions.assertNotNull(actualSchemaSubject.getId());
+          Assertions.assertNotNull(actualSchemaSubject.getVersion());
+          Assertions.assertNotNull(actualSchemaSubject.getCompatibilityLevel());
+          Assertions.assertEquals("\"string\"", actualSchemaSubject.getSchema());
+        });
+  }
+
+  @Test
+  public void shouldOkWhenCreateNewSchemaThenGetAndUpdateItsCompatibilityLevel() {
+    createNewSubjectAndAssert(subject);
+
+    //Get the created schema and check its items
+    webTestClient
+        .get()
+        .uri("/api/clusters/{clusterName}/schemas/{subject}/latest", LOCAL, subject)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBodyList(SchemaSubject.class)
+        .consumeWith(listEntityExchangeResult -> {
+          val expectedCompatibility =
+              CompatibilityLevel.CompatibilityEnum.BACKWARD;
+          assertSchemaWhenGetLatest(subject, listEntityExchangeResult, expectedCompatibility);
+        });
+
+    // Now let's change compatibility level of this schema to FULL whereas the global
+    // level should be BACKWARD
+
+    webTestClient.put()
+        .uri("/api/clusters/{clusterName}/schemas/{subject}/compatibility", LOCAL, subject)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromValue("{\"compatibility\":\"FULL\"}"))
+        .exchange()
+        .expectStatus().isOk();
+
+    //Get one more time to check the schema compatibility level is changed to FULL
+    webTestClient
+        .get()
+        .uri("/api/clusters/{clusterName}/schemas/{subject}/latest", LOCAL, subject)
+        .exchange()
+        .expectStatus().isOk()
+        .expectBodyList(SchemaSubject.class)
+        .consumeWith(listEntityExchangeResult -> {
+          val expectedCompatibility =
+              CompatibilityLevel.CompatibilityEnum.FULL;
+          assertSchemaWhenGetLatest(subject, listEntityExchangeResult, expectedCompatibility);
+        });
+  }
+
+  private void createNewSubjectAndAssert(String subject) {
+    webTestClient
+        .post()
+        .uri("/api/clusters/{clusterName}/schemas", LOCAL)
+        .contentType(MediaType.APPLICATION_JSON)
+        .body(BodyInserters.fromValue(
+            String.format(
+                "{\"subject\":\"%s\",\"schemaType\":\"AVRO\",\"schema\":"
+                    + "\"{\\\"type\\\": \\\"string\\\"}\"}",
+                subject
+            )
+        ))
+        .exchange()
+        .expectStatus().isOk()
+        .expectBody(SchemaSubject.class)
+        .consumeWith(this::assertResponseBodyWhenCreateNewSchema);
+  }
+
+  private void assertSchemaWhenGetLatest(
+      String subject, EntityExchangeResult<List<SchemaSubject>> listEntityExchangeResult,
+      CompatibilityLevel.CompatibilityEnum expectedCompatibility) {
+    List<SchemaSubject> responseBody = listEntityExchangeResult.getResponseBody();
+    Assertions.assertNotNull(responseBody);
+    Assertions.assertEquals(1, responseBody.size());
+    SchemaSubject actualSchema = responseBody.get(0);
+    Assertions.assertNotNull(actualSchema);
+    Assertions.assertEquals(subject, actualSchema.getSubject());
+    Assertions.assertEquals("\"string\"", actualSchema.getSchema());
+
+    Assertions.assertNotNull(actualSchema.getCompatibilityLevel());
+    Assertions.assertEquals(SchemaType.AVRO, actualSchema.getSchemaType());
+    Assertions.assertEquals(expectedCompatibility.name(), actualSchema.getCompatibilityLevel());
+  }
+
+  private void assertResponseBodyWhenCreateNewSchema(
+      EntityExchangeResult<SchemaSubject> exchangeResult) {
+    SchemaSubject responseBody = exchangeResult.getResponseBody();
+    Assertions.assertNotNull(responseBody);
+    Assertions.assertEquals("1", responseBody.getVersion());
+    Assertions.assertNotNull(responseBody.getSchema());
+    Assertions.assertNotNull(responseBody.getSubject());
+    Assertions.assertNotNull(responseBody.getCompatibilityLevel());
+    Assertions.assertEquals(SchemaType.AVRO, responseBody.getSchemaType());
+  }
 }

+ 38 - 39
kafka-ui-api/src/test/java/com/provectus/kafka/ui/container/KafkaConnectContainer.java

@@ -1,48 +1,47 @@
 package com.provectus.kafka.ui.container;
 
+import java.time.Duration;
 import org.testcontainers.containers.GenericContainer;
 import org.testcontainers.containers.KafkaContainer;
 import org.testcontainers.containers.Network;
 import org.testcontainers.containers.wait.strategy.Wait;
 
-import java.time.Duration;
-
 public class KafkaConnectContainer extends GenericContainer<KafkaConnectContainer> {
-    private static final int CONNECT_PORT = 8083;
-
-    public KafkaConnectContainer(String version) {
-        super("confluentinc/cp-kafka-connect:" + version);
-        addExposedPort(CONNECT_PORT);
-        waitStrategy = Wait.forHttp("/")
-                .withStartupTimeout(Duration.ofMinutes(5));
-    }
-
-
-    public KafkaConnectContainer withKafka(KafkaContainer kafka) {
-        String bootstrapServers = kafka.getNetworkAliases().get(0) + ":9092";
-        return withKafka(kafka.getNetwork(), bootstrapServers);
-    }
-
-    public KafkaConnectContainer withKafka(Network network, String bootstrapServers) {
-        withNetwork(network);
-        withEnv("CONNECT_BOOTSTRAP_SERVERS", "PLAINTEXT://" + bootstrapServers);
-        withEnv("CONNECT_GROUP_ID", "connect-group");
-        withEnv("CONNECT_CONFIG_STORAGE_TOPIC", "_connect_configs");
-        withEnv("CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR", "1");
-        withEnv("CONNECT_OFFSET_STORAGE_TOPIC", "_connect_offset");
-        withEnv("CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR", "1");
-        withEnv("CONNECT_STATUS_STORAGE_TOPIC", "_connect_status");
-        withEnv("CONNECT_STATUS_STORAGE_REPLICATION_FACTOR", "1");
-        withEnv("CONNECT_KEY_CONVERTER", "org.apache.kafka.connect.storage.StringConverter");
-        withEnv("CONNECT_VALUE_CONVERTER", "org.apache.kafka.connect.storage.StringConverter");
-        withEnv("CONNECT_INTERNAL_KEY_CONVERTER", "org.apache.kafka.connect.json.JsonConverter");
-        withEnv("CONNECT_INTERNAL_VALUE_CONVERTER", "org.apache.kafka.connect.json.JsonConverter");
-        withEnv("CONNECT_REST_ADVERTISED_HOST_NAME", "kafka-connect");
-        withEnv("CONNECT_PLUGIN_PATH", "/usr/share/java,/usr/share/confluent-hub-components");
-        return self();
-    }
-
-    public String getTarget() {
-        return "http://" + getContainerIpAddress() + ":" + getMappedPort(CONNECT_PORT);
-    }
+  private static final int CONNECT_PORT = 8083;
+
+  public KafkaConnectContainer(String version) {
+    super("confluentinc/cp-kafka-connect:" + version);
+    addExposedPort(CONNECT_PORT);
+    waitStrategy = Wait.forHttp("/")
+        .withStartupTimeout(Duration.ofMinutes(5));
+  }
+
+
+  public KafkaConnectContainer withKafka(KafkaContainer kafka) {
+    String bootstrapServers = kafka.getNetworkAliases().get(0) + ":9092";
+    return withKafka(kafka.getNetwork(), bootstrapServers);
+  }
+
+  public KafkaConnectContainer withKafka(Network network, String bootstrapServers) {
+    withNetwork(network);
+    withEnv("CONNECT_BOOTSTRAP_SERVERS", "PLAINTEXT://" + bootstrapServers);
+    withEnv("CONNECT_GROUP_ID", "connect-group");
+    withEnv("CONNECT_CONFIG_STORAGE_TOPIC", "_connect_configs");
+    withEnv("CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR", "1");
+    withEnv("CONNECT_OFFSET_STORAGE_TOPIC", "_connect_offset");
+    withEnv("CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR", "1");
+    withEnv("CONNECT_STATUS_STORAGE_TOPIC", "_connect_status");
+    withEnv("CONNECT_STATUS_STORAGE_REPLICATION_FACTOR", "1");
+    withEnv("CONNECT_KEY_CONVERTER", "org.apache.kafka.connect.storage.StringConverter");
+    withEnv("CONNECT_VALUE_CONVERTER", "org.apache.kafka.connect.storage.StringConverter");
+    withEnv("CONNECT_INTERNAL_KEY_CONVERTER", "org.apache.kafka.connect.json.JsonConverter");
+    withEnv("CONNECT_INTERNAL_VALUE_CONVERTER", "org.apache.kafka.connect.json.JsonConverter");
+    withEnv("CONNECT_REST_ADVERTISED_HOST_NAME", "kafka-connect");
+    withEnv("CONNECT_PLUGIN_PATH", "/usr/share/java,/usr/share/confluent-hub-components");
+    return self();
+  }
+
+  public String getTarget() {
+    return "http://" + getContainerIpAddress() + ":" + getMappedPort(CONNECT_PORT);
+  }
 }

+ 19 - 19
kafka-ui-api/src/test/java/com/provectus/kafka/ui/container/SchemaRegistryContainer.java

@@ -5,27 +5,27 @@ import org.testcontainers.containers.KafkaContainer;
 import org.testcontainers.containers.Network;
 
 public class SchemaRegistryContainer extends GenericContainer<SchemaRegistryContainer> {
-    private static final int SCHEMA_PORT = 8081;
+  private static final int SCHEMA_PORT = 8081;
 
-    public SchemaRegistryContainer(String version) {
-        super("confluentinc/cp-schema-registry:" + version);
-        withExposedPorts(8081);
-    }
+  public SchemaRegistryContainer(String version) {
+    super("confluentinc/cp-schema-registry:" + version);
+    withExposedPorts(8081);
+  }
 
-    public SchemaRegistryContainer withKafka(KafkaContainer kafka) {
-        String bootstrapServers = kafka.getNetworkAliases().get(0) + ":9092";
-        return withKafka(kafka.getNetwork(), bootstrapServers);
-    }
+  public SchemaRegistryContainer withKafka(KafkaContainer kafka) {
+    String bootstrapServers = kafka.getNetworkAliases().get(0) + ":9092";
+    return withKafka(kafka.getNetwork(), bootstrapServers);
+  }
 
-    public SchemaRegistryContainer withKafka(Network network, String bootstrapServers) {
-        withNetwork(network);
-        withEnv("SCHEMA_REGISTRY_HOST_NAME", "schema-registry");
-        withEnv("SCHEMA_REGISTRY_LISTENERS", "http://0.0.0.0:" + SCHEMA_PORT);
-        withEnv("SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS", "PLAINTEXT://" + bootstrapServers);
-        return self();
-    }
+  public SchemaRegistryContainer withKafka(Network network, String bootstrapServers) {
+    withNetwork(network);
+    withEnv("SCHEMA_REGISTRY_HOST_NAME", "schema-registry");
+    withEnv("SCHEMA_REGISTRY_LISTENERS", "http://0.0.0.0:" + SCHEMA_PORT);
+    withEnv("SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS", "PLAINTEXT://" + bootstrapServers);
+    return self();
+  }
 
-    public String getTarget() {
-        return "http://" + getContainerIpAddress() + ":" + getMappedPort(SCHEMA_PORT);
-    }
+  public String getTarget() {
+    return "http://" + getContainerIpAddress() + ":" + getMappedPort(SCHEMA_PORT);
+  }
 }

+ 24 - 21
kafka-ui-api/src/test/java/com/provectus/kafka/ui/deserialization/SchemaRegistryRecordDeserializerTest.java

@@ -1,34 +1,37 @@
 package com.provectus.kafka.ui.deserialization;
 
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.provectus.kafka.ui.model.KafkaCluster;
+import java.util.Map;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.common.utils.Bytes;
 import org.junit.jupiter.api.Test;
 
-import java.util.Map;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-
 class SchemaRegistryRecordDeserializerTest {
 
-    private final SchemaRegistryRecordDeserializer deserializer = new SchemaRegistryRecordDeserializer(
-            KafkaCluster.builder()
-                    .schemaNameTemplate("%s-value")
-                    .build(),
-            new ObjectMapper()
-    );
+  private final SchemaRegistryRecordDeserializer deserializer =
+      new SchemaRegistryRecordDeserializer(
+          KafkaCluster.builder()
+              .schemaNameTemplate("%s-value")
+              .build(),
+          new ObjectMapper()
+      );
 
-    @Test
-    public void shouldDeserializeStringValue() {
-        var value = "test";
-        var deserializedRecord = deserializer.deserialize(new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()), Bytes.wrap(value.getBytes())));
-        assertEquals(value, deserializedRecord);
-    }
+  @Test
+  public void shouldDeserializeStringValue() {
+    var value = "test";
+    var deserializedRecord = deserializer.deserialize(
+        new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()),
+            Bytes.wrap(value.getBytes())));
+    assertEquals(value, deserializedRecord);
+  }
 
-    @Test
-    public void shouldDeserializeNullValueRecordToEmptyMap() {
-        var deserializedRecord = deserializer.deserialize(new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()), null));
-        assertEquals(Map.of(), deserializedRecord);
-    }
+  @Test
+  public void shouldDeserializeNullValueRecordToEmptyMap() {
+    var deserializedRecord = deserializer
+        .deserialize(new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()), null));
+    assertEquals(Map.of(), deserializedRecord);
+  }
 }

+ 20 - 21
kafka-ui-api/src/test/java/com/provectus/kafka/ui/producer/KafkaTestProducer.java

@@ -1,35 +1,34 @@
 package com.provectus.kafka.ui.producer;
 
+import java.util.Map;
 import org.apache.kafka.clients.producer.KafkaProducer;
 import org.apache.kafka.clients.producer.ProducerConfig;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.common.serialization.StringSerializer;
 import org.testcontainers.containers.KafkaContainer;
 
-import java.util.Map;
-
 public class KafkaTestProducer<KeyT, ValueT> implements AutoCloseable {
-    private final KafkaProducer<KeyT, ValueT> producer;
+  private final KafkaProducer<KeyT, ValueT> producer;
 
-    private KafkaTestProducer(KafkaProducer<KeyT, ValueT> producer) {
-        this.producer = producer;
-    }
+  private KafkaTestProducer(KafkaProducer<KeyT, ValueT> producer) {
+    this.producer = producer;
+  }
 
-    public static KafkaTestProducer<String, String> forKafka(KafkaContainer kafkaContainer) {
-        return new KafkaTestProducer<>(new KafkaProducer<>(Map.of(
-                ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaContainer.getBootstrapServers(),
-                ProducerConfig.CLIENT_ID_CONFIG, "KafkaTestProducer",
-                ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class,
-                ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class
-        )));
-    }
+  public static KafkaTestProducer<String, String> forKafka(KafkaContainer kafkaContainer) {
+    return new KafkaTestProducer<>(new KafkaProducer<>(Map.of(
+        ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaContainer.getBootstrapServers(),
+        ProducerConfig.CLIENT_ID_CONFIG, "KafkaTestProducer",
+        ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class,
+        ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class
+    )));
+  }
 
-    public void send(String topic, ValueT value) {
-        producer.send(new ProducerRecord<>(topic, value));
-    }
+  public void send(String topic, ValueT value) {
+    producer.send(new ProducerRecord<>(topic, value));
+  }
 
-    @Override
-    public void close() {
-        producer.close();
-    }
+  @Override
+  public void close() {
+    producer.close();
+  }
 }

+ 73 - 75
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java

@@ -1,17 +1,12 @@
 package com.provectus.kafka.ui.service;
 
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.Mockito.when;
+
 import com.provectus.kafka.ui.mapper.ClusterMapper;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.Topic;
-import org.junit.jupiter.api.Test;
-import org.junit.jupiter.api.extension.ExtendWith;
-import org.mapstruct.factory.Mappers;
-import org.mockito.InjectMocks;
-import org.mockito.Mock;
-import org.mockito.Spy;
-import org.mockito.junit.jupiter.MockitoExtension;
-
 import java.util.Map;
 import java.util.Objects;
 import java.util.Optional;
@@ -19,83 +14,86 @@ import java.util.UUID;
 import java.util.function.Function;
 import java.util.stream.Collectors;
 import java.util.stream.IntStream;
-
-import static org.assertj.core.api.Assertions.assertThat;
-import static org.mockito.Mockito.when;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mapstruct.factory.Mappers;
+import org.mockito.InjectMocks;
+import org.mockito.Mock;
+import org.mockito.Spy;
+import org.mockito.junit.jupiter.MockitoExtension;
 
 @ExtendWith(MockitoExtension.class)
 class ClusterServiceTest {
-    @InjectMocks
-    private ClusterService clusterService;
-
-    @Mock
-    private ClustersStorage clustersStorage;
-    @Spy
-    private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
+  @Spy
+  private final ClusterMapper clusterMapper = Mappers.getMapper(ClusterMapper.class);
+  @InjectMocks
+  private ClusterService clusterService;
+  @Mock
+  private ClustersStorage clustersStorage;
 
-    @Test
-    public void shouldListFirst20Topics() {
-        var topicName = UUID.randomUUID().toString();
+  @Test
+  public void shouldListFirst20Topics() {
+    var topicName = UUID.randomUUID().toString();
 
-        when(clustersStorage.getClusterByName(topicName))
-                .thenReturn(Optional.of(KafkaCluster.builder()
-                        .topics(
-                                IntStream.rangeClosed(1, 100).boxed()
-                                        .map(Objects::toString)
-                                        .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
-                                                .partitions(Map.of())
-                                                .name(e)
-                                                .build()))
-                        )
-                        .build()));
+    when(clustersStorage.getClusterByName(topicName))
+        .thenReturn(Optional.of(KafkaCluster.builder()
+            .topics(
+                IntStream.rangeClosed(1, 100).boxed()
+                    .map(Objects::toString)
+                    .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
+                        .partitions(Map.of())
+                        .name(e)
+                        .build()))
+            )
+            .build()));
 
-        var topics = clusterService.getTopics(topicName, Optional.empty(), Optional.empty());
-        assertThat(topics.getPageCount()).isEqualTo(5);
-        assertThat(topics.getTopics()).hasSize(20);
-        assertThat(topics.getTopics()).map(Topic::getName).isSorted();
-    }
+    var topics = clusterService.getTopics(topicName, Optional.empty(), Optional.empty());
+    assertThat(topics.getPageCount()).isEqualTo(5);
+    assertThat(topics.getTopics()).hasSize(20);
+    assertThat(topics.getTopics()).map(Topic::getName).isSorted();
+  }
 
-    @Test
-    public void shouldCalculateCorrectPageCountForNonDivisiblePageSize() {
-        var topicName = UUID.randomUUID().toString();
+  @Test
+  public void shouldCalculateCorrectPageCountForNonDivisiblePageSize() {
+    var topicName = UUID.randomUUID().toString();
 
-        when(clustersStorage.getClusterByName(topicName))
-                .thenReturn(Optional.of(KafkaCluster.builder()
-                        .topics(
-                                IntStream.rangeClosed(1, 100).boxed()
-                                        .map(Objects::toString)
-                                        .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
-                                                .partitions(Map.of())
-                                                .name(e)
-                                                .build()))
-                        )
-                        .build()));
+    when(clustersStorage.getClusterByName(topicName))
+        .thenReturn(Optional.of(KafkaCluster.builder()
+            .topics(
+                IntStream.rangeClosed(1, 100).boxed()
+                    .map(Objects::toString)
+                    .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
+                        .partitions(Map.of())
+                        .name(e)
+                        .build()))
+            )
+            .build()));
 
-        var topics = clusterService.getTopics(topicName, Optional.of(4), Optional.of(33));
-        assertThat(topics.getPageCount()).isEqualTo(4);
-        assertThat(topics.getTopics()).hasSize(1)
-                .first().extracting(Topic::getName).isEqualTo("99");
-    }
+    var topics = clusterService.getTopics(topicName, Optional.of(4), Optional.of(33));
+    assertThat(topics.getPageCount()).isEqualTo(4);
+    assertThat(topics.getTopics()).hasSize(1)
+        .first().extracting(Topic::getName).isEqualTo("99");
+  }
 
-    @Test
-    public void shouldCorrectlyHandleNonPositivePageNumberAndPageSize() {
-        var topicName = UUID.randomUUID().toString();
+  @Test
+  public void shouldCorrectlyHandleNonPositivePageNumberAndPageSize() {
+    var topicName = UUID.randomUUID().toString();
 
-        when(clustersStorage.getClusterByName(topicName))
-                .thenReturn(Optional.of(KafkaCluster.builder()
-                        .topics(
-                                IntStream.rangeClosed(1, 100).boxed()
-                                        .map(Objects::toString)
-                                        .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
-                                                .partitions(Map.of())
-                                                .name(e)
-                                                .build()))
-                        )
-                        .build()));
+    when(clustersStorage.getClusterByName(topicName))
+        .thenReturn(Optional.of(KafkaCluster.builder()
+            .topics(
+                IntStream.rangeClosed(1, 100).boxed()
+                    .map(Objects::toString)
+                    .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
+                        .partitions(Map.of())
+                        .name(e)
+                        .build()))
+            )
+            .build()));
 
-        var topics = clusterService.getTopics(topicName, Optional.of(0), Optional.of(-1));
-        assertThat(topics.getPageCount()).isEqualTo(5);
-        assertThat(topics.getTopics()).hasSize(20);
-        assertThat(topics.getTopics()).map(Topic::getName).isSorted();
-    }
+    var topics = clusterService.getTopics(topicName, Optional.of(0), Optional.of(-1));
+    assertThat(topics.getPageCount()).isEqualTo(5);
+    assertThat(topics.getTopics()).hasSize(20);
+    assertThat(topics.getTopics()).map(Topic::getName).isSorted();
+  }
 }