Browse Source

Merge branch 'master' into checks/447-creating-topic-via-api

Bogdan Volodarskiy 4 years ago
parent
commit
b2f3561405
100 changed files with 5873 additions and 21579 deletions
  1. 1 1
      .github/workflows/backend.yml
  2. 4 1
      README.md
  3. 1 1
      charts/kafka-ui/templates/NOTES.txt
  4. 4 2
      charts/kafka-ui/templates/deployment.yaml
  5. 22 14
      charts/kafka-ui/templates/ingress.yaml
  6. 23 4
      charts/kafka-ui/values.yaml
  7. 3 0
      docker/jaas/client.properties
  8. 14 0
      docker/jaas/kafka_server.conf
  9. 52 0
      docker/kafka-ui-sasl.yaml
  10. 48 0
      guides/SSO.md
  11. 1 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
  12. 3 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java
  13. 6 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java
  14. 70 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
  15. 33 7
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
  16. 23 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java
  17. 0 46
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/ProtobufFileRecordDeserializer.java
  18. 0 9
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/RecordDeserializer.java
  19. 0 232
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/SchemaRegistryRecordDeserializer.java
  20. 0 19
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/SimpleRecordDeserializer.java
  21. 103 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
  22. 49 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java
  23. 2 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java
  24. 12 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/IllegalEntityStateException.java
  25. 13 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/NotFoundException.java
  26. 4 4
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java
  27. 1 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ExtendedAdminClient.java
  28. 34 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalConsumerGroup.java
  29. 3 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
  30. 11 6
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/DeserializationService.java
  31. 110 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDe.java
  32. 26 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/RecordSerDe.java
  33. 45 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/SimpleRecordSerDe.java
  34. 23 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageFormatter.java
  35. 40 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageReader.java
  36. 8 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormat.java
  37. 5 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormatter.java
  38. 32 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageReader.java
  39. 23 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageFormatter.java
  40. 44 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageReader.java
  41. 288 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java
  42. 11 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/StringMessageFormatter.java
  43. 127 39
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java
  44. 5 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java
  45. 24 167
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java
  46. 290 47
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java
  47. 169 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/OffsetsResetService.java
  48. 168 97
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java
  49. 146 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java
  50. 120 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekBackward.java
  51. 61 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekForward.java
  52. 21 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ArrayFieldSchema.java
  53. 137 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java
  54. 24 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/EnumJsonType.java
  55. 8 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/FieldSchema.java
  56. 65 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/JsonSchema.java
  57. 7 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/JsonSchemaConverter.java
  58. 41 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/JsonType.java
  59. 22 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/MapFieldSchema.java
  60. 46 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java
  61. 27 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/OneOfFieldSchema.java
  62. 134 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverter.java
  63. 18 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/RefFieldSchema.java
  64. 17 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/SimpleFieldSchema.java
  65. 21 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/SimpleJsonType.java
  66. 12 13
      kafka-ui-api/src/main/resources/application-local.yml
  67. 9 10
      kafka-ui-api/src/main/resources/application-sdp.yml
  68. 4 4
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/AbstractBaseTest.java
  69. 99 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerGroupTests.java
  70. 51 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java
  71. 8 1
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/container/SchemaRegistryContainer.java
  72. 8 8
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/serde/SchemaRegistryRecordDeserializerTest.java
  73. 64 10
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java
  74. 219 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java
  75. 0 119
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsSeekTest.java
  76. 203 31
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java
  77. 360 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java
  78. 196 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/OffsetsSeekTest.java
  79. 91 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java
  80. 63 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverterTest.java
  81. 370 48
      kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
  82. 3 0
      kafka-ui-e2e-checks/.env.ci
  83. 2 2
      kafka-ui-e2e-checks/.env.example
  84. 10 2
      kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java
  85. 5 1
      kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/screenshots/Screenshooter.java
  86. 1 1
      kafka-ui-react-app/.nvmrc
  87. 10 0
      kafka-ui-react-app/README.md
  88. 22 20583
      kafka-ui-react-app/package-lock.json
  89. 20 20
      kafka-ui-react-app/package.json
  90. 88 0
      kafka-ui-react-app/src/components/Brokers/__test__/Brokers.spec.tsx
  91. 8 0
      kafka-ui-react-app/src/components/Brokers/__test__/BrokersContainer.spec.tsx
  92. 731 0
      kafka-ui-react-app/src/components/Brokers/__test__/__snapshots__/Brokers.spec.tsx.snap
  93. 1 1
      kafka-ui-react-app/src/components/Cluster/__tests__/Cluster.spec.tsx
  94. 44 8
      kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx
  95. 8 2
      kafka-ui-react-app/src/components/ConsumerGroups/Details/DetailsContainer.ts
  96. 2 2
      kafka-ui-react-app/src/components/ConsumerGroups/Details/ListItem.tsx
  97. 102 0
      kafka-ui-react-app/src/components/ConsumerGroups/Details/__tests__/Details.spec.tsx
  98. 150 0
      kafka-ui-react-app/src/components/ConsumerGroups/Details/__tests__/__snapshots__/Details.spec.tsx.snap
  99. 6 3
      kafka-ui-react-app/src/components/ConsumerGroups/List/List.tsx
  100. 10 4
      kafka-ui-react-app/src/components/ConsumerGroups/List/ListItem.tsx

+ 1 - 1
.github/workflows/backend.yml

@@ -20,7 +20,7 @@ jobs:
       - name: Set the values
       - name: Set the values
         id: step_one
         id: step_one
         run: |
         run: |
-           cat "./kafka-ui-e2e-checks/.env.example" >> "./kafka-ui-e2e-checks/.env"
+           cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
       - name: pull docker
       - name: pull docker
         id: step_four
         id: step_four
         run: |
         run: |

+ 4 - 1
README.md

@@ -108,7 +108,7 @@ To read more please follow to [chart documentation](charts/kafka-ui/README.md)
 
 
 # Guides
 # Guides
 
 
-To be done
+- [SSO configuration](guides/SSO.md)
 
 
 ## Connecting to a Secure Broker
 ## Connecting to a Secure Broker
 
 
@@ -162,5 +162,8 @@ For example, if you want to use an environment variable to set the `name` parame
 |`KAFKA_CLUSTERS_0_SCHEMANAMETEMPLATE`  |How keys are saved to schemaRegistry
 |`KAFKA_CLUSTERS_0_SCHEMANAMETEMPLATE`  |How keys are saved to schemaRegistry
 |`KAFKA_CLUSTERS_0_JMXPORT`        	|Open jmxPosrts of a broker
 |`KAFKA_CLUSTERS_0_JMXPORT`        	|Open jmxPosrts of a broker
 |`KAFKA_CLUSTERS_0_READONLY`        	|Enable read only mode. Default: false
 |`KAFKA_CLUSTERS_0_READONLY`        	|Enable read only mode. Default: false
+|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME` |Given name for the Kafka Connect cluster
+|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint 
 |`LOGGING_LEVEL_ROOT`        	| Setting log level (all, debug, info, warn, error, fatal, off). Default: debug
 |`LOGGING_LEVEL_ROOT`        	| Setting log level (all, debug, info, warn, error, fatal, off). Default: debug
 |`LOGGING_LEVEL_COM_PROVECTUS`        	|Setting log level (all, debug, info, warn, error, fatal, off). Default: debug
 |`LOGGING_LEVEL_COM_PROVECTUS`        	|Setting log level (all, debug, info, warn, error, fatal, off). Default: debug
+|`SERVER_PORT` |Port for the embedded server. Default `8080`

+ 1 - 1
charts/kafka-ui/templates/NOTES.txt

@@ -17,5 +17,5 @@
 {{- else if contains "ClusterIP" .Values.service.type }}
 {{- else if contains "ClusterIP" .Values.service.type }}
   export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
   export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
   echo "Visit http://127.0.0.1:8080 to use your application"
   echo "Visit http://127.0.0.1:8080 to use your application"
-  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
+  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
 {{- end }}
 {{- end }}

+ 4 - 2
charts/kafka-ui/templates/deployment.yaml

@@ -52,14 +52,16 @@ spec:
               protocol: TCP
               protocol: TCP
           livenessProbe:
           livenessProbe:
             httpGet:
             httpGet:
-              path: /
+              {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/" | urlParse }}
+              path: {{ get $contextPath "path" }}
               port: http
               port: http
             initialDelaySeconds: 60
             initialDelaySeconds: 60
             periodSeconds: 30
             periodSeconds: 30
             timeoutSeconds: 10
             timeoutSeconds: 10
           readinessProbe:
           readinessProbe:
             httpGet:
             httpGet:
-              path: /
+              {{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/" | urlParse }}
+              path: {{ get $contextPath "path" }}
               port: http
               port: http
             initialDelaySeconds: 60
             initialDelaySeconds: 60
             periodSeconds: 30
             periodSeconds: 30

+ 22 - 14
charts/kafka-ui/templates/ingress.yaml

@@ -16,26 +16,34 @@ metadata:
     {{- toYaml . | nindent 4 }}
     {{- toYaml . | nindent 4 }}
   {{- end }}
   {{- end }}
 spec:
 spec:
-  {{- if .Values.ingress.tls }}
+  {{- if .Values.ingress.tls.enabled }}
   tls:
   tls:
-    {{- range .Values.ingress.tls }}
     - hosts:
     - hosts:
-        {{- range .hosts }}
-        - {{ . | quote }}
-        {{- end }}
-      secretName: {{ .secretName }}
-    {{- end }}
+        - {{ .Values.ingress.host }}
+      secretName: {{ .Values.ingress.tls.secretName }}
   {{- end }}
   {{- end }}
   rules:
   rules:
-    {{- range .Values.ingress.hosts }}
-    - host: {{ .host | quote }}
-      http:
+    - http:
         paths:
         paths:
-          {{- range .paths }}
-          - path: {{ . }}
+          {{- range .Values.ingress.precedingPaths }}
+          - path: {{ .path }}
             backend:
             backend:
+              serviceName: {{ .serviceName }}
+              servicePort: {{ .servicePort }}
+          {{- end }}
+          - backend:
               serviceName: {{ $fullName }}
               serviceName: {{ $fullName }}
               servicePort: {{ $svcPort }}
               servicePort: {{ $svcPort }}
+{{- if .Values.ingress.path }}
+            path: {{ .Values.ingress.path }}
+{{- end }}
+          {{- range .Values.ingress.succeedingPaths }}
+          - path: {{ .path }}
+            backend:
+              serviceName: {{ .serviceName }}
+              servicePort: {{ .servicePort }}
           {{- end }}
           {{- end }}
-    {{- end }}
-  {{- end }}
+{{- if .Values.ingress.host }}
+      host: {{ .Values.ingress.host }}
+{{- end }}
+  {{- end }}

+ 23 - 4
charts/kafka-ui/values.yaml

@@ -44,13 +44,32 @@ service:
   # if you want to force a specific nodePort. Must be use with service.type=NodePort
   # if you want to force a specific nodePort. Must be use with service.type=NodePort
   # nodePort:
   # nodePort:
 
 
+# Ingress configuration
 ingress:
 ingress:
+  # Enable ingress resource
   enabled: false
   enabled: false
+
+  # Annotations for the Ingress
   annotations: {}
   annotations: {}
-  hosts:
-    - host: chart-example.local
-      paths: []
-  tls: []
+
+  # The path for the Ingress
+  path: ""
+
+  # The hostname for the Ingress
+  host: ""
+
+  # configs for Ingress TLS
+  tls:
+    # Enable TLS termination for the Ingress
+    enabled: false
+    # the name of a pre-created Secret containing a TLS private key and certificate
+    secretName: ""
+
+  # HTTP paths to add to the Ingress before the default path
+  precedingPaths: []
+
+  # Http paths to add to the Ingress after the default path
+  succeedingPaths: []
 
 
 resources: {}
 resources: {}
   # limits:
   # limits:

+ 3 - 0
docker/jaas/client.properties

@@ -0,0 +1,3 @@
+sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";
+security.protocol=SASL_PLAINTEXT
+sasl.mechanism=PLAIN

+ 14 - 0
docker/jaas/kafka_server.conf

@@ -0,0 +1,14 @@
+KafkaServer {
+    org.apache.kafka.common.security.plain.PlainLoginModule required
+    username="admin"
+    password="admin-secret"
+    user_admin="admin-secret"
+    user_enzo="cisternino";
+};
+
+KafkaClient {
+    org.apache.kafka.common.security.plain.PlainLoginModule required
+    user_admin="admin-secret";
+};
+
+Client {};

+ 52 - 0
docker/kafka-ui-sasl.yaml

@@ -0,0 +1,52 @@
+---
+version: '2'
+services:
+
+  kafka-ui:
+    container_name: kafka-ui
+    image: provectuslabs/kafka-ui:latest
+    ports:
+      - 8080:8080
+    depends_on:
+      - zookeeper
+      - kafka
+    environment:
+      KAFKA_CLUSTERS_0_NAME: local
+#      SERVER_SERVLET_CONTEXT_PATH: "/kafkaui"
+      KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
+      KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181
+      KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
+      KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
+      KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
+  zookeeper:
+    image: confluentinc/cp-zookeeper:5.2.4
+    environment:
+      ZOOKEEPER_CLIENT_PORT: 2181
+      ZOOKEEPER_TICK_TIME: 2000
+    ports:
+      - 2181:2181
+
+  kafka:
+    image: wurstmeister/kafka:latest
+    hostname: kafka
+    container_name: kafka
+    depends_on:
+      - zookeeper
+    ports:
+      - '9092:9092'
+    environment:
+      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
+      KAFKA_LISTENERS: SASL_PLAINTEXT://kafka:9092
+      KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://kafka:9092
+      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
+      ALLOW_PLAINTEXT_LISTENER: 'yes'
+      KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
+      KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
+      KAFKA_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT
+      KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
+      KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
+      KAFKA_SECURITY_PROTOCOL: SASL_PLAINTEXT
+      KAFKA_SUPER_USERS: User:admin,User:enzo
+      KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
+    volumes:
+      - ./jaas:/etc/kafka/jaas

+ 48 - 0
guides/SSO.md

@@ -0,0 +1,48 @@
+# How to configure SSO
+SSO require additionaly to configure TLS for application, in that example we will use self-signed certificate, in case of use legal certificates please skip step 1.
+#### Step 1
+At this step we will generate self-signed PKCS12 keypair.
+``` bash
+mkdir cert
+keytool -genkeypair -alias ui-for-apache-kafka -keyalg RSA -keysize 2048 \
+  -storetype PKCS12 -keystore cert/ui-for-apache-kafka.p12 -validity 3650
+```
+#### Step 2
+Create new application in any SSO provider, we will continue with [Auth0](https://auth0.com).
+
+<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-new-app.png" width="70%"/>
+
+After that need to provide callback URLs, in our case we will use `https://127.0.0.1:8080/login/oauth2/code/auth0`
+
+<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-configuration.png" width="70%"/>
+
+This is a main parameters required for enabling SSO
+
+<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-parameters.png" width="70%"/>
+
+#### Step 3
+To launch UI for Apache Kafka with enabled TLS and SSO run following:
+``` bash
+docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_ENABLED=true \
+  -e SECURITY_BASIC_ENABLED=true \
+  -e SERVER_SSL_KEY_STORE_TYPE=PKCS12 \
+  -e SERVER_SSL_KEY_STORE=/opt/cert/ui-for-apache-kafka.p12 \
+  -e SERVER_SSL_KEY_STORE_PASSWORD=123456 \
+  -e SERVER_SSL_KEY_ALIAS=ui-for-apache-kafka \
+  -e SERVER_SSL_ENABLED=true \
+  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
+  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
+  -e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
+  -e TRUST_STORE=/opt/cert/ui-for-apache-kafka.p12 \
+  -e TRUST_STORE_PASSWORD=123456 \
+provectuslabs/kafka-ui:0.1.0
+```
+In the case with trusted CA-signed SSL certificate and SSL termination somewhere outside of application we can pass only SSO related environment variables:
+``` bash
+docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_ENABLED=true \
+  -e SECURITY_BASIC_ENABLED=true \
+  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
+  -e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
+  -e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
+provectuslabs/kafka-ui:0.1.0
+```

+ 1 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java

@@ -21,6 +21,7 @@ public class ClustersProperties {
     String zookeeper;
     String zookeeper;
     String schemaRegistry;
     String schemaRegistry;
     String schemaNameTemplate = "%s-value";
     String schemaNameTemplate = "%s-value";
+    String keySchemaNameTemplate = "%s-key";
     String protobufFile;
     String protobufFile;
     String protobufMessageName;
     String protobufMessageName;
     List<ConnectCluster> kafkaConnect;
     List<ConnectCluster> kafkaConnect;

+ 3 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/CustomWebFilter.java

@@ -22,13 +22,13 @@ public class CustomWebFilter implements WebFilter {
     String contextPath = serverProperties.getServlet().getContextPath() != null 
     String contextPath = serverProperties.getServlet().getContextPath() != null 
         ? serverProperties.getServlet().getContextPath() : "";
         ? serverProperties.getServlet().getContextPath() : "";
 
 
-    if (exchange.getRequest().getURI().getPath().equals(contextPath + "/")
-        || exchange.getRequest().getURI().getPath().startsWith(contextPath + "/ui")) {
+    final String path = exchange.getRequest().getURI().getPath().replaceAll("/$", "");
+    if (path.equals(contextPath) || path.startsWith(contextPath + "/ui")) {
       return chain.filter(
       return chain.filter(
           exchange.mutate().request(exchange.getRequest().mutate().path("/index.html").build())
           exchange.mutate().request(exchange.getRequest().mutate().path("/index.html").build())
               .build()
               .build()
       );
       );
-    } else if (exchange.getRequest().getURI().getPath().startsWith(contextPath)) {
+    } else if (path.startsWith(contextPath)) {
       return chain.filter(
       return chain.filter(
           exchange.mutate().request(exchange.getRequest().mutate().contextPath(contextPath).build())
           exchange.mutate().request(exchange.getRequest().mutate().contextPath(contextPath).build())
               .build()
               .build()

+ 6 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ClustersController.java

@@ -39,4 +39,10 @@ public class ClustersController implements ClustersApi {
   public Mono<ResponseEntity<Flux<Cluster>>> getClusters(ServerWebExchange exchange) {
   public Mono<ResponseEntity<Flux<Cluster>>> getClusters(ServerWebExchange exchange) {
     return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getClusters())));
     return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getClusters())));
   }
   }
+
+  @Override
+  public Mono<ResponseEntity<Cluster>> updateClusterInfo(String clusterName,
+                                                         ServerWebExchange exchange) {
+    return clusterService.updateCluster(clusterName).map(ResponseEntity::ok);
+  }
 }
 }

+ 70 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java

@@ -1,13 +1,23 @@
 package com.provectus.kafka.ui.controller;
 package com.provectus.kafka.ui.controller;
 
 
+import static java.util.stream.Collectors.toMap;
+
 import com.provectus.kafka.ui.api.ConsumerGroupsApi;
 import com.provectus.kafka.ui.api.ConsumerGroupsApi;
+import com.provectus.kafka.ui.exception.ClusterNotFoundException;
+import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.ConsumerGroupDetails;
 import com.provectus.kafka.ui.model.ConsumerGroupDetails;
-import com.provectus.kafka.ui.model.TopicConsumerGroups;
+import com.provectus.kafka.ui.model.ConsumerGroupOffsetsReset;
+import com.provectus.kafka.ui.model.PartitionOffset;
 import com.provectus.kafka.ui.service.ClusterService;
 import com.provectus.kafka.ui.service.ClusterService;
+import com.provectus.kafka.ui.service.ClustersStorage;
+import com.provectus.kafka.ui.service.OffsetsResetService;
+import java.util.Map;
+import java.util.Optional;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import lombok.extern.log4j.Log4j2;
 import org.springframework.http.ResponseEntity;
 import org.springframework.http.ResponseEntity;
+import org.springframework.util.CollectionUtils;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.server.ServerWebExchange;
 import org.springframework.web.server.ServerWebExchange;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Flux;
@@ -18,6 +28,15 @@ import reactor.core.publisher.Mono;
 @Log4j2
 @Log4j2
 public class ConsumerGroupsController implements ConsumerGroupsApi {
 public class ConsumerGroupsController implements ConsumerGroupsApi {
   private final ClusterService clusterService;
   private final ClusterService clusterService;
+  private final OffsetsResetService offsetsResetService;
+  private final ClustersStorage clustersStorage;
+
+  @Override
+  public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName, String id,
+                                                        ServerWebExchange exchange) {
+    return clusterService.deleteConsumerGroupById(clusterName, id)
+        .map(ResponseEntity::ok);
+  }
 
 
   @Override
   @Override
   public Mono<ResponseEntity<ConsumerGroupDetails>> getConsumerGroup(
   public Mono<ResponseEntity<ConsumerGroupDetails>> getConsumerGroup(
@@ -37,9 +56,56 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
   }
   }
 
 
   @Override
   @Override
-  public Mono<ResponseEntity<TopicConsumerGroups>> getTopicConsumerGroups(
+  public Mono<ResponseEntity<Flux<ConsumerGroup>>> getTopicConsumerGroups(
       String clusterName, String topicName, ServerWebExchange exchange) {
       String clusterName, String topicName, ServerWebExchange exchange) {
-    return clusterService.getTopicConsumerGroupDetail(clusterName, topicName)
-        .map(ResponseEntity::ok);
+    return clusterService.getConsumerGroups(clusterName, Optional.of(topicName))
+        .map(Flux::fromIterable)
+        .map(ResponseEntity::ok)
+        .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
   }
   }
+
+
+  @Override
+  public Mono<ResponseEntity<Void>> resetConsumerGroupOffsets(String clusterName, String group,
+                                                              Mono<ConsumerGroupOffsetsReset>
+                                                                  consumerGroupOffsetsReset,
+                                                              ServerWebExchange exchange) {
+    return consumerGroupOffsetsReset.map(reset -> {
+      var cluster =
+          clustersStorage.getClusterByName(clusterName).orElseThrow(ClusterNotFoundException::new);
+
+      switch (reset.getResetType()) {
+        case EARLIEST:
+          offsetsResetService
+              .resetToEarliest(cluster, group, reset.getTopic(), reset.getPartitions());
+          break;
+        case LATEST:
+          offsetsResetService
+              .resetToLatest(cluster, group, reset.getTopic(), reset.getPartitions());
+          break;
+        case TIMESTAMP:
+          if (reset.getResetToTimestamp() == null) {
+            throw new ValidationException(
+                "resetToTimestamp is required when TIMESTAMP reset type used");
+          }
+          offsetsResetService
+              .resetToTimestamp(cluster, group, reset.getTopic(), reset.getPartitions(),
+                  reset.getResetToTimestamp());
+          break;
+        case OFFSET:
+          if (CollectionUtils.isEmpty(reset.getPartitionsOffsets())) {
+            throw new ValidationException(
+                "partitionsOffsets is required when OFFSET reset type used");
+          }
+          Map<Integer, Long> offsets = reset.getPartitionsOffsets().stream()
+              .collect(toMap(PartitionOffset::getPartition, PartitionOffset::getOffset));
+          offsetsResetService.resetToOffsets(cluster, group, reset.getTopic(), offsets);
+          break;
+        default:
+          throw new ValidationException("Unknown resetType " + reset.getResetType());
+      }
+      return ResponseEntity.ok().build();
+    });
+  }
+
 }
 }

+ 33 - 7
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java

@@ -2,8 +2,11 @@ package com.provectus.kafka.ui.controller;
 
 
 import com.provectus.kafka.ui.api.MessagesApi;
 import com.provectus.kafka.ui.api.MessagesApi;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.CreateTopicMessage;
+import com.provectus.kafka.ui.model.SeekDirection;
 import com.provectus.kafka.ui.model.SeekType;
 import com.provectus.kafka.ui.model.SeekType;
 import com.provectus.kafka.ui.model.TopicMessage;
 import com.provectus.kafka.ui.model.TopicMessage;
+import com.provectus.kafka.ui.model.TopicMessageSchema;
 import com.provectus.kafka.ui.service.ClusterService;
 import com.provectus.kafka.ui.service.ClusterService;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.List;
 import java.util.List;
@@ -13,6 +16,7 @@ import javax.validation.Valid;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import lombok.extern.log4j.Log4j2;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.commons.lang3.tuple.Pair;
+import org.apache.kafka.common.TopicPartition;
 import org.springframework.http.ResponseEntity;
 import org.springframework.http.ResponseEntity;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.bind.annotation.RestController;
 import org.springframework.web.server.ServerWebExchange;
 import org.springframework.web.server.ServerWebExchange;
@@ -40,28 +44,50 @@ public class MessagesController implements MessagesApi {
   @Override
   @Override
   public Mono<ResponseEntity<Flux<TopicMessage>>> getTopicMessages(
   public Mono<ResponseEntity<Flux<TopicMessage>>> getTopicMessages(
       String clusterName, String topicName, @Valid SeekType seekType, @Valid List<String> seekTo,
       String clusterName, String topicName, @Valid SeekType seekType, @Valid List<String> seekTo,
-      @Valid Integer limit, @Valid String q, ServerWebExchange exchange) {
-    return parseConsumerPosition(seekType, seekTo)
+      @Valid Integer limit, @Valid String q, @Valid SeekDirection seekDirection,
+      ServerWebExchange exchange) {
+    return parseConsumerPosition(topicName, seekType, seekTo, seekDirection)
         .map(consumerPosition -> ResponseEntity
         .map(consumerPosition -> ResponseEntity
             .ok(clusterService.getMessages(clusterName, topicName, consumerPosition, q, limit)));
             .ok(clusterService.getMessages(clusterName, topicName, consumerPosition, q, limit)));
   }
   }
 
 
-  private Mono<ConsumerPosition> parseConsumerPosition(SeekType seekType, List<String> seekTo) {
+  @Override
+  public Mono<ResponseEntity<TopicMessageSchema>> getTopicSchema(
+      String clusterName, String topicName, ServerWebExchange exchange) {
+    return Mono.just(clusterService.getTopicSchema(clusterName, topicName))
+        .map(ResponseEntity::ok);
+  }
+
+  @Override
+  public Mono<ResponseEntity<Void>> sendTopicMessages(
+      String clusterName, String topicName, @Valid Mono<CreateTopicMessage> createTopicMessage,
+      ServerWebExchange exchange) {
+    return createTopicMessage.flatMap(msg ->
+        clusterService.sendMessage(clusterName, topicName, msg)
+    ).map(ResponseEntity::ok);
+  }
+
+
+  private Mono<ConsumerPosition> parseConsumerPosition(
+      String topicName, SeekType seekType, List<String> seekTo,  SeekDirection seekDirection) {
     return Mono.justOrEmpty(seekTo)
     return Mono.justOrEmpty(seekTo)
         .defaultIfEmpty(Collections.emptyList())
         .defaultIfEmpty(Collections.emptyList())
         .flatMapIterable(Function.identity())
         .flatMapIterable(Function.identity())
         .map(p -> {
         .map(p -> {
-          String[] splited = p.split("::");
-          if (splited.length != 2) {
+          String[] split = p.split("::");
+          if (split.length != 2) {
             throw new IllegalArgumentException(
             throw new IllegalArgumentException(
                 "Wrong seekTo argument format. See API docs for details");
                 "Wrong seekTo argument format. See API docs for details");
           }
           }
 
 
-          return Pair.of(Integer.parseInt(splited[0]), Long.parseLong(splited[1]));
+          return Pair.of(
+              new TopicPartition(topicName, Integer.parseInt(split[0])),
+              Long.parseLong(split[1])
+          );
         })
         })
         .collectMap(Pair::getKey, Pair::getValue)
         .collectMap(Pair::getKey, Pair::getValue)
         .map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekType.BEGINNING,
         .map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekType.BEGINNING,
-            positions));
+            positions, seekDirection));
   }
   }
 
 
 }
 }

+ 23 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/TopicsController.java

@@ -1,6 +1,10 @@
 package com.provectus.kafka.ui.controller;
 package com.provectus.kafka.ui.controller;
 
 
 import com.provectus.kafka.ui.api.TopicsApi;
 import com.provectus.kafka.ui.api.TopicsApi;
+import com.provectus.kafka.ui.model.PartitionsIncrease;
+import com.provectus.kafka.ui.model.PartitionsIncreaseResponse;
+import com.provectus.kafka.ui.model.ReplicationFactorChange;
+import com.provectus.kafka.ui.model.ReplicationFactorChangeResponse;
 import com.provectus.kafka.ui.model.Topic;
 import com.provectus.kafka.ui.model.Topic;
 import com.provectus.kafka.ui.model.TopicColumnsToSort;
 import com.provectus.kafka.ui.model.TopicColumnsToSort;
 import com.provectus.kafka.ui.model.TopicConfig;
 import com.provectus.kafka.ui.model.TopicConfig;
@@ -86,4 +90,23 @@ public class TopicsController implements TopicsApi {
       ServerWebExchange exchange) {
       ServerWebExchange exchange) {
     return clusterService.updateTopic(clusterId, topicName, topicUpdate).map(ResponseEntity::ok);
     return clusterService.updateTopic(clusterId, topicName, topicUpdate).map(ResponseEntity::ok);
   }
   }
+
+  @Override
+  public Mono<ResponseEntity<PartitionsIncreaseResponse>> increaseTopicPartitions(
+      String clusterName, String topicName,
+      Mono<PartitionsIncrease> partitionsIncrease,
+      ServerWebExchange exchange) {
+    return partitionsIncrease.flatMap(
+        partitions -> clusterService.increaseTopicPartitions(clusterName, topicName, partitions))
+        .map(ResponseEntity::ok);
+  }
+
+  @Override
+  public Mono<ResponseEntity<ReplicationFactorChangeResponse>> changeReplicationFactor(
+      String clusterName, String topicName, Mono<ReplicationFactorChange> replicationFactorChange,
+      ServerWebExchange exchange) {
+    return replicationFactorChange
+        .flatMap(rfc -> clusterService.changeReplicationFactor(clusterName, topicName, rfc))
+        .map(ResponseEntity::ok);
+  }
 }
 }

+ 0 - 46
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/ProtobufFileRecordDeserializer.java

@@ -1,46 +0,0 @@
-package com.provectus.kafka.ui.deserialization;
-
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.protobuf.DynamicMessage;
-import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
-import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
-import java.io.ByteArrayInputStream;
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.Map;
-import java.util.stream.Collectors;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.utils.Bytes;
-
-public class ProtobufFileRecordDeserializer implements RecordDeserializer {
-  private final ProtobufSchema protobufSchema;
-  private final ObjectMapper objectMapper;
-
-  public ProtobufFileRecordDeserializer(Path protobufSchemaPath, String messageName,
-                                        ObjectMapper objectMapper) throws IOException {
-    this.objectMapper = objectMapper;
-    final String schemaString = Files.lines(protobufSchemaPath).collect(Collectors.joining());
-    this.protobufSchema = new ProtobufSchema(schemaString).copy(messageName);
-  }
-
-  @Override
-  public Object deserialize(ConsumerRecord<Bytes, Bytes> msg) {
-    try {
-      final var message = DynamicMessage.parseFrom(
-          protobufSchema.toDescriptor(),
-          new ByteArrayInputStream(msg.value().get())
-      );
-      byte[] bytes = ProtobufSchemaUtils.toJson(message);
-      return parseJson(bytes);
-    } catch (Throwable e) {
-      throw new RuntimeException("Failed to parse record from topic " + msg.topic(), e);
-    }
-  }
-
-  private Object parseJson(byte[] bytes) throws IOException {
-    return objectMapper.readValue(bytes, new TypeReference<Map<String, Object>>() {
-    });
-  }
-}

+ 0 - 9
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/RecordDeserializer.java

@@ -1,9 +0,0 @@
-package com.provectus.kafka.ui.deserialization;
-
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.utils.Bytes;
-
-public interface RecordDeserializer {
-
-  Object deserialize(ConsumerRecord<Bytes, Bytes> msg);
-}

+ 0 - 232
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/SchemaRegistryRecordDeserializer.java

@@ -1,232 +0,0 @@
-package com.provectus.kafka.ui.deserialization;
-
-import com.fasterxml.jackson.core.type.TypeReference;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.google.protobuf.Message;
-import com.provectus.kafka.ui.model.KafkaCluster;
-import io.confluent.kafka.schemaregistry.ParsedSchema;
-import io.confluent.kafka.schemaregistry.SchemaProvider;
-import io.confluent.kafka.schemaregistry.avro.AvroSchemaProvider;
-import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
-import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient;
-import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
-import io.confluent.kafka.schemaregistry.client.rest.entities.Schema;
-import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
-import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaProvider;
-import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
-import io.confluent.kafka.serializers.KafkaAvroDeserializer;
-import io.confluent.kafka.serializers.protobuf.KafkaProtobufDeserializer;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Collections;
-import java.util.List;
-import java.util.Map;
-import java.util.Optional;
-import java.util.concurrent.ConcurrentHashMap;
-import lombok.SneakyThrows;
-import lombok.extern.log4j.Log4j2;
-import org.apache.avro.generic.GenericRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.errors.SerializationException;
-import org.apache.kafka.common.serialization.StringDeserializer;
-import org.apache.kafka.common.utils.Bytes;
-
-@Log4j2
-public class SchemaRegistryRecordDeserializer implements RecordDeserializer {
-
-  private static final int CLIENT_IDENTITY_MAP_CAPACITY = 100;
-
-  private final KafkaCluster cluster;
-  private final SchemaRegistryClient schemaRegistryClient;
-  private final KafkaAvroDeserializer avroDeserializer;
-  private final KafkaProtobufDeserializer<?> protobufDeserializer;
-  private final ObjectMapper objectMapper;
-  private final StringDeserializer stringDeserializer;
-
-  private final Map<String, MessageFormat> topicFormatMap = new ConcurrentHashMap<>();
-
-  public SchemaRegistryRecordDeserializer(KafkaCluster cluster, ObjectMapper objectMapper) {
-    this.cluster = cluster;
-    this.objectMapper = objectMapper;
-
-    this.schemaRegistryClient = Optional.ofNullable(cluster.getSchemaRegistry())
-        .map(schemaRegistryUrl -> {
-              List<SchemaProvider> schemaProviders =
-                  List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider());
-              return new CachedSchemaRegistryClient(
-                  Collections.singletonList(schemaRegistryUrl),
-                  CLIENT_IDENTITY_MAP_CAPACITY,
-                  schemaProviders,
-                  Collections.emptyMap()
-              );
-            }
-        ).orElse(null);
-
-    this.avroDeserializer = Optional.ofNullable(this.schemaRegistryClient)
-        .map(KafkaAvroDeserializer::new)
-        .orElse(null);
-    this.protobufDeserializer = Optional.ofNullable(this.schemaRegistryClient)
-        .map(KafkaProtobufDeserializer::new)
-        .orElse(null);
-    this.stringDeserializer = new StringDeserializer();
-  }
-
-  public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
-    MessageFormat format = getMessageFormat(record);
-
-    try {
-      Object parsedValue;
-      switch (format) {
-        case AVRO:
-          parsedValue = parseAvroRecord(record);
-          break;
-        case PROTOBUF:
-          parsedValue = parseProtobufRecord(record);
-          break;
-        case JSON:
-          parsedValue = parseJsonRecord(record);
-          break;
-        case STRING:
-          parsedValue = parseStringRecord(record);
-          break;
-        default:
-          throw new IllegalArgumentException(
-              "Unknown message format " + format + " for topic " + record.topic());
-      }
-      return parsedValue;
-    } catch (IOException e) {
-      throw new RuntimeException("Failed to parse record from topic " + record.topic(), e);
-    }
-  }
-
-  private MessageFormat getMessageFormat(ConsumerRecord<Bytes, Bytes> record) {
-    return topicFormatMap.computeIfAbsent(record.topic(), k -> detectFormat(record));
-  }
-
-  private MessageFormat detectFormat(ConsumerRecord<Bytes, Bytes> msg) {
-    if (schemaRegistryClient != null) {
-      try {
-        final Optional<String> type = getSchemaFromMessage(msg).or(() -> getSchemaBySubject(msg));
-        if (type.isPresent()) {
-          if (type.get().equals(MessageFormat.PROTOBUF.name())) {
-            try {
-              protobufDeserializer.deserialize(msg.topic(), msg.value().get());
-              return MessageFormat.PROTOBUF;
-            } catch (Throwable e) {
-              log.info("Failed to get Protobuf schema for topic {}", msg.topic(), e);
-            }
-          } else if (type.get().equals(MessageFormat.AVRO.name())) {
-            try {
-              avroDeserializer.deserialize(msg.topic(), msg.value().get());
-              return MessageFormat.AVRO;
-            } catch (Throwable e) {
-              log.info("Failed to get Avro schema for topic {}", msg.topic(), e);
-            }
-          } else if (type.get().equals(MessageFormat.JSON.name())) {
-            try {
-              parseJsonRecord(msg);
-              return MessageFormat.JSON;
-            } catch (IOException e) {
-              log.info("Failed to parse json from topic {}", msg.topic());
-            }
-          }
-        }
-      } catch (Exception e) {
-        log.warn("Failed to get Schema for topic {}", msg.topic(), e);
-      }
-    }
-
-    try {
-      parseJsonRecord(msg);
-      return MessageFormat.JSON;
-    } catch (IOException e) {
-      log.info("Failed to parse json from topic {}", msg.topic());
-    }
-
-    return MessageFormat.STRING;
-  }
-
-  @SneakyThrows
-  private Optional<String> getSchemaFromMessage(ConsumerRecord<Bytes, Bytes> msg) {
-    Optional<String> result = Optional.empty();
-    final Bytes value = msg.value();
-    if (value != null) {
-      ByteBuffer buffer = ByteBuffer.wrap(value.get());
-      if (buffer.get() == 0) {
-        int id = buffer.getInt();
-        result = Optional.ofNullable(
-            schemaRegistryClient.getSchemaById(id)
-        ).map(ParsedSchema::schemaType);
-      }
-    }
-    return result;
-  }
-
-  @SneakyThrows
-  private Optional<String> getSchemaBySubject(ConsumerRecord<Bytes, Bytes> msg) {
-    String schemaName = String.format(cluster.getSchemaNameTemplate(), msg.topic());
-    final List<Integer> versions = schemaRegistryClient.getAllVersions(schemaName);
-    if (!versions.isEmpty()) {
-      final Integer version = versions.iterator().next();
-      final String subjectName = String.format(cluster.getSchemaNameTemplate(), msg.topic());
-      final Schema schema = schemaRegistryClient.getByVersion(subjectName, version, false);
-      return Optional.ofNullable(schema).map(Schema::getSchemaType);
-    } else {
-      return Optional.empty();
-    }
-  }
-
-  private Object parseAvroRecord(ConsumerRecord<Bytes, Bytes> msg) throws IOException {
-    String topic = msg.topic();
-    if (msg.value() != null && avroDeserializer != null) {
-      byte[] valueBytes = msg.value().get();
-      GenericRecord avroRecord = (GenericRecord) avroDeserializer.deserialize(topic, valueBytes);
-      byte[] bytes = AvroSchemaUtils.toJson(avroRecord);
-      return parseJson(bytes);
-    } else {
-      return Map.of();
-    }
-  }
-
-  private Object parseProtobufRecord(ConsumerRecord<Bytes, Bytes> msg) throws IOException {
-    String topic = msg.topic();
-    if (msg.value() != null && protobufDeserializer != null) {
-      byte[] valueBytes = msg.value().get();
-      final Message message = protobufDeserializer.deserialize(topic, valueBytes);
-      byte[] bytes = ProtobufSchemaUtils.toJson(message);
-      return parseJson(bytes);
-    } else {
-      return Map.of();
-    }
-  }
-
-  private Object parseJsonRecord(ConsumerRecord<Bytes, Bytes> msg) throws IOException {
-    var value = msg.value();
-    if (value == null) {
-      return Map.of();
-    }
-    byte[] valueBytes = value.get();
-    return parseJson(valueBytes);
-  }
-
-  private Object parseJson(byte[] bytes) throws IOException {
-    return objectMapper.readValue(bytes, new TypeReference<Map<String, Object>>() {
-    });
-  }
-
-  private Object parseStringRecord(ConsumerRecord<Bytes, Bytes> msg) {
-    String topic = msg.topic();
-    if (msg.value() == null) {
-      return Map.of();
-    }
-    byte[] valueBytes = msg.value().get();
-    return stringDeserializer.deserialize(topic, valueBytes);
-  }
-
-  public enum MessageFormat {
-    AVRO,
-    JSON,
-    STRING,
-    PROTOBUF
-  }
-}

+ 0 - 19
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/SimpleRecordDeserializer.java

@@ -1,19 +0,0 @@
-package com.provectus.kafka.ui.deserialization;
-
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.common.serialization.StringDeserializer;
-import org.apache.kafka.common.utils.Bytes;
-
-public class SimpleRecordDeserializer implements RecordDeserializer {
-
-  private final StringDeserializer stringDeserializer = new StringDeserializer();
-
-  @Override
-  public Object deserialize(ConsumerRecord<Bytes, Bytes> msg) {
-    if (msg.value() != null) {
-      return stringDeserializer.deserialize(msg.topic(), msg.value().get());
-    } else {
-      return "empty";
-    }
-  }
-}

+ 103 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java

@@ -0,0 +1,103 @@
+package com.provectus.kafka.ui.emitter;
+
+import com.provectus.kafka.ui.util.OffsetsSeekBackward;
+import java.time.Duration;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Bytes;
+import reactor.core.publisher.FluxSink;
+
+@RequiredArgsConstructor
+@Log4j2
+public class BackwardRecordEmitter
+    implements java.util.function.Consumer<FluxSink<ConsumerRecord<Bytes, Bytes>>> {
+
+  private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
+
+  private final Function<Map<String, Object>, KafkaConsumer<Bytes, Bytes>> consumerSupplier;
+  private final OffsetsSeekBackward offsetsSeek;
+
+  @Override
+  public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
+    try (KafkaConsumer<Bytes, Bytes> configConsumer = consumerSupplier.apply(Map.of())) {
+      final List<TopicPartition> requestedPartitions =
+          offsetsSeek.getRequestedPartitions(configConsumer);
+      final int msgsPerPartition = offsetsSeek.msgsPerPartition(requestedPartitions.size());
+      try (KafkaConsumer<Bytes, Bytes> consumer =
+               consumerSupplier.apply(
+                   Map.of(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, msgsPerPartition)
+               )
+      ) {
+        final Map<TopicPartition, Long> partitionsOffsets =
+            offsetsSeek.getPartitionsOffsets(consumer);
+        log.debug("partition offsets: {}", partitionsOffsets);
+        var waitingOffsets =
+            offsetsSeek.waitingOffsets(consumer, partitionsOffsets.keySet());
+        log.debug("waittin offsets {} {}",
+            waitingOffsets.getBeginOffsets(),
+            waitingOffsets.getEndOffsets()
+        );
+        while (!sink.isCancelled() && !waitingOffsets.beginReached()) {
+          for (Map.Entry<TopicPartition, Long> entry : partitionsOffsets.entrySet()) {
+            final Long lowest = waitingOffsets.getBeginOffsets().get(entry.getKey().partition());
+            if (lowest != null) {
+              consumer.assign(Collections.singleton(entry.getKey()));
+              final long offset = Math.max(lowest, entry.getValue() - msgsPerPartition);
+              log.debug("Polling {} from {}", entry.getKey(), offset);
+              consumer.seek(entry.getKey(), offset);
+              ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
+              final List<ConsumerRecord<Bytes, Bytes>> partitionRecords =
+                  records.records(entry.getKey()).stream()
+                      .filter(r -> r.offset() < partitionsOffsets.get(entry.getKey()))
+                      .collect(Collectors.toList());
+              Collections.reverse(partitionRecords);
+
+              log.debug("{} records polled", records.count());
+              log.debug("{} records sent", partitionRecords.size());
+
+              // This is workaround for case when partition begin offset is less than
+              // real minimal offset, usually appear in compcated topics
+              if (records.count() > 0  && partitionRecords.isEmpty()) {
+                waitingOffsets.markPolled(entry.getKey().partition());
+              }
+
+              for (ConsumerRecord<Bytes, Bytes> msg : partitionRecords) {
+                if (!sink.isCancelled() && !waitingOffsets.beginReached()) {
+                  sink.next(msg);
+                  waitingOffsets.markPolled(msg);
+                } else {
+                  log.info("Begin reached");
+                  break;
+                }
+              }
+              partitionsOffsets.put(
+                  entry.getKey(),
+                  Math.max(offset, entry.getValue() - msgsPerPartition)
+              );
+            }
+          }
+          if (waitingOffsets.beginReached()) {
+            log.info("begin reached after partitions");
+          } else if (sink.isCancelled()) {
+            log.info("sink is cancelled after partitions");
+          }
+        }
+        sink.complete();
+        log.info("Polling finished");
+      }
+    } catch (Exception e) {
+      log.error("Error occurred while consuming records", e);
+      sink.error(e);
+    }
+  }
+}

+ 49 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/ForwardRecordEmitter.java

@@ -0,0 +1,49 @@
+package com.provectus.kafka.ui.emitter;
+
+import com.provectus.kafka.ui.util.OffsetsSeek;
+import java.time.Duration;
+import java.util.function.Supplier;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.utils.Bytes;
+import reactor.core.publisher.FluxSink;
+
+@RequiredArgsConstructor
+@Log4j2
+public class ForwardRecordEmitter
+    implements java.util.function.Consumer<FluxSink<ConsumerRecord<Bytes, Bytes>>> {
+
+  private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
+
+  private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
+  private final OffsetsSeek offsetsSeek;
+
+  @Override
+  public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
+    try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
+      var waitingOffsets = offsetsSeek.assignAndSeek(consumer);
+      while (!sink.isCancelled() && !waitingOffsets.endReached()) {
+        ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
+        log.info("{} records polled", records.count());
+
+        for (ConsumerRecord<Bytes, Bytes> msg : records) {
+          if (!sink.isCancelled() && !waitingOffsets.endReached()) {
+            sink.next(msg);
+            waitingOffsets.markPolled(msg);
+          } else {
+            break;
+          }
+        }
+
+      }
+      sink.complete();
+      log.info("Polling finished");
+    } catch (Exception e) {
+      log.error("Error occurred while consuming records", e);
+      sink.error(e);
+    }
+  }
+}

+ 2 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/ErrorCode.java

@@ -9,6 +9,8 @@ public enum ErrorCode {
 
 
   UNEXPECTED(5000, HttpStatus.INTERNAL_SERVER_ERROR),
   UNEXPECTED(5000, HttpStatus.INTERNAL_SERVER_ERROR),
   BINDING_FAIL(4001, HttpStatus.BAD_REQUEST),
   BINDING_FAIL(4001, HttpStatus.BAD_REQUEST),
+  NOT_FOUND(404, HttpStatus.NOT_FOUND),
+  INVALID_ENTITY_STATE(4001, HttpStatus.BAD_REQUEST),
   VALIDATION_FAIL(4002, HttpStatus.BAD_REQUEST),
   VALIDATION_FAIL(4002, HttpStatus.BAD_REQUEST),
   READ_ONLY_MODE_ENABLE(4003, HttpStatus.METHOD_NOT_ALLOWED),
   READ_ONLY_MODE_ENABLE(4003, HttpStatus.METHOD_NOT_ALLOWED),
   REBALANCE_IN_PROGRESS(4004, HttpStatus.CONFLICT),
   REBALANCE_IN_PROGRESS(4004, HttpStatus.CONFLICT),

+ 12 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/IllegalEntityStateException.java

@@ -0,0 +1,12 @@
+package com.provectus.kafka.ui.exception;
+
+public class IllegalEntityStateException extends CustomBaseException {
+  public IllegalEntityStateException(String message) {
+    super(message);
+  }
+
+  @Override
+  public ErrorCode getErrorCode() {
+    return ErrorCode.INVALID_ENTITY_STATE;
+  }
+}

+ 13 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/exception/NotFoundException.java

@@ -0,0 +1,13 @@
+package com.provectus.kafka.ui.exception;
+
+public class NotFoundException extends CustomBaseException {
+
+  public NotFoundException(String message) {
+    super(message);
+  }
+
+  @Override
+  public ErrorCode getErrorCode() {
+    return ErrorCode.NOT_FOUND;
+  }
+}

+ 4 - 4
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ConsumerPosition.java

@@ -2,11 +2,11 @@ package com.provectus.kafka.ui.model;
 
 
 import java.util.Map;
 import java.util.Map;
 import lombok.Value;
 import lombok.Value;
+import org.apache.kafka.common.TopicPartition;
 
 
 @Value
 @Value
 public class ConsumerPosition {
 public class ConsumerPosition {
-
-  private SeekType seekType;
-  private Map<Integer, Long> seekTo;
-
+  SeekType seekType;
+  Map<TopicPartition, Long> seekTo;
+  SeekDirection seekDirection;
 }
 }

+ 1 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/ExtendedAdminClient.java

@@ -15,6 +15,7 @@ public class ExtendedAdminClient {
   private final Set<SupportedFeature> supportedFeatures;
   private final Set<SupportedFeature> supportedFeatures;
 
 
   public static Mono<ExtendedAdminClient> extendedAdminClient(AdminClient adminClient) {
   public static Mono<ExtendedAdminClient> extendedAdminClient(AdminClient adminClient) {
+
     return ClusterUtil.getSupportedFeatures(adminClient)
     return ClusterUtil.getSupportedFeatures(adminClient)
         .map(s -> new ExtendedAdminClient(adminClient, s));
         .map(s -> new ExtendedAdminClient(adminClient, s));
   }
   }

+ 34 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalConsumerGroup.java

@@ -0,0 +1,34 @@
+package com.provectus.kafka.ui.model;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.Set;
+import lombok.Builder;
+import lombok.Data;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.common.ConsumerGroupState;
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.TopicPartition;
+
+@Data
+@Builder(toBuilder = true)
+public class InternalConsumerGroup {
+  private final String groupId;
+  private final boolean simple;
+  private final Collection<InternalMember> members;
+  private final Map<TopicPartition, OffsetAndMetadata> offsets;
+  private final Map<TopicPartition, Long> endOffsets;
+  private final String partitionAssignor;
+  private final ConsumerGroupState state;
+  private final Node coordinator;
+
+  @Data
+  @Builder(toBuilder = true)
+  public static class InternalMember {
+    private final String consumerId;
+    private final String groupInstanceId;
+    private final String clientId;
+    private final String host;
+    private final Set<TopicPartition> assignment;
+  }
+}

+ 3 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java

@@ -11,16 +11,19 @@ import lombok.Data;
 @Builder(toBuilder = true)
 @Builder(toBuilder = true)
 public class KafkaCluster {
 public class KafkaCluster {
   private final String name;
   private final String name;
+  private final String version;
   private final Integer jmxPort;
   private final Integer jmxPort;
   private final String bootstrapServers;
   private final String bootstrapServers;
   private final String zookeeper;
   private final String zookeeper;
   private final String schemaRegistry;
   private final String schemaRegistry;
   private final List<KafkaConnectCluster> kafkaConnect;
   private final List<KafkaConnectCluster> kafkaConnect;
   private final String schemaNameTemplate;
   private final String schemaNameTemplate;
+  private final String keySchemaNameTemplate;
   private final ServerStatus status;
   private final ServerStatus status;
   private final ServerStatus zookeeperStatus;
   private final ServerStatus zookeeperStatus;
   private final InternalClusterMetrics metrics;
   private final InternalClusterMetrics metrics;
   private final Map<String, InternalTopic> topics;
   private final Map<String, InternalTopic> topics;
+  private final List<Integer> brokers;
   private final Throwable lastKafkaException;
   private final Throwable lastKafkaException;
   private final Throwable lastZookeeperException;
   private final Throwable lastZookeeperException;
   private final Path protobufFile;
   private final Path protobufFile;

+ 11 - 6
kafka-ui-api/src/main/java/com/provectus/kafka/ui/deserialization/DeserializationService.java → kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/DeserializationService.java

@@ -1,21 +1,24 @@
-package com.provectus.kafka.ui.deserialization;
+package com.provectus.kafka.ui.serde;
 
 
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe;
 import com.provectus.kafka.ui.service.ClustersStorage;
 import com.provectus.kafka.ui.service.ClustersStorage;
 import java.util.Map;
 import java.util.Map;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import javax.annotation.PostConstruct;
 import javax.annotation.PostConstruct;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
+import lombok.extern.log4j.Log4j2;
 import org.springframework.stereotype.Component;
 import org.springframework.stereotype.Component;
 
 
+@Log4j2
 @Component
 @Component
 @RequiredArgsConstructor
 @RequiredArgsConstructor
 public class DeserializationService {
 public class DeserializationService {
 
 
   private final ClustersStorage clustersStorage;
   private final ClustersStorage clustersStorage;
   private final ObjectMapper objectMapper;
   private final ObjectMapper objectMapper;
-  private Map<String, RecordDeserializer> clusterDeserializers;
+  private Map<String, RecordSerDe> clusterDeserializers;
 
 
 
 
   @PostConstruct
   @PostConstruct
@@ -27,20 +30,22 @@ public class DeserializationService {
         ));
         ));
   }
   }
 
 
-  private RecordDeserializer createRecordDeserializerForCluster(KafkaCluster cluster) {
+  private RecordSerDe createRecordDeserializerForCluster(KafkaCluster cluster) {
     try {
     try {
       if (cluster.getProtobufFile() != null) {
       if (cluster.getProtobufFile() != null) {
-        return new ProtobufFileRecordDeserializer(cluster.getProtobufFile(),
+        log.info("Using ProtobufFileRecordSerDe for cluster '{}'", cluster.getName());
+        return new ProtobufFileRecordSerDe(cluster.getProtobufFile(),
             cluster.getProtobufMessageName(), objectMapper);
             cluster.getProtobufMessageName(), objectMapper);
       } else {
       } else {
-        return new SchemaRegistryRecordDeserializer(cluster, objectMapper);
+        log.info("Using SchemaRegistryAwareRecordSerDe for cluster '{}'", cluster.getName());
+        return new SchemaRegistryAwareRecordSerDe(cluster);
       }
       }
     } catch (Throwable e) {
     } catch (Throwable e) {
       throw new RuntimeException("Can't init deserializer", e);
       throw new RuntimeException("Can't init deserializer", e);
     }
     }
   }
   }
 
 
-  public RecordDeserializer getRecordDeserializerForCluster(KafkaCluster cluster) {
+  public RecordSerDe getRecordDeserializerForCluster(KafkaCluster cluster) {
     return clusterDeserializers.get(cluster.getName());
     return clusterDeserializers.get(cluster.getName());
   }
   }
 }
 }

+ 110 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/ProtobufFileRecordSerDe.java

@@ -0,0 +1,110 @@
+package com.provectus.kafka.ui.serde;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.protobuf.DynamicMessage;
+import com.google.protobuf.util.JsonFormat;
+import com.provectus.kafka.ui.model.MessageSchema;
+import com.provectus.kafka.ui.model.TopicMessageSchema;
+import com.provectus.kafka.ui.util.jsonschema.JsonSchema;
+import com.provectus.kafka.ui.util.jsonschema.ProtobufSchemaConverter;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import javax.annotation.Nullable;
+import lombok.SneakyThrows;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.utils.Bytes;
+
+//TODO: currently we assume that keys for this serde are always string - need to discuss if it is ok
+public class ProtobufFileRecordSerDe implements RecordSerDe {
+  private final ProtobufSchema protobufSchema;
+  private final ObjectMapper objectMapper;
+  private final Path protobufSchemaPath;
+  private final ProtobufSchemaConverter schemaConverter = new ProtobufSchemaConverter();
+
+  public ProtobufFileRecordSerDe(Path protobufSchemaPath, String messageName,
+                                 ObjectMapper objectMapper) throws IOException {
+    this.objectMapper = objectMapper;
+    this.protobufSchemaPath = protobufSchemaPath;
+    try (final Stream<String> lines = Files.lines(protobufSchemaPath)) {
+      this.protobufSchema = new ProtobufSchema(
+          lines.collect(Collectors.joining())
+      ).copy(messageName);
+    }
+  }
+
+  @Override
+  public DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg) {
+    try {
+      return new DeserializedKeyValue(
+          msg.key() != null ? new String(msg.key().get()) : null,
+          msg.value() != null ? parse(msg.value().get()) : null
+      );
+    } catch (Throwable e) {
+      throw new RuntimeException("Failed to parse record from topic " + msg.topic(), e);
+    }
+  }
+
+  @SneakyThrows
+  private String parse(byte[] value) {
+    DynamicMessage protoMsg = DynamicMessage.parseFrom(
+        protobufSchema.toDescriptor(),
+        new ByteArrayInputStream(value)
+    );
+    byte[] jsonFromProto = ProtobufSchemaUtils.toJson(protoMsg);
+    return new String(jsonFromProto);
+  }
+
+  @Override
+  public ProducerRecord<byte[], byte[]> serialize(String topic,
+                                                  @Nullable String key,
+                                                  @Nullable String data,
+                                                  @Nullable Integer partition) {
+    if (data == null) {
+      return new ProducerRecord<>(topic, partition, Objects.requireNonNull(key).getBytes(), null);
+    }
+    DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
+    try {
+      JsonFormat.parser().merge(data, builder);
+      final DynamicMessage message = builder.build();
+      return new ProducerRecord<>(
+          topic,
+          partition,
+          Optional.ofNullable(key).map(String::getBytes).orElse(null),
+          message.toByteArray()
+      );
+    } catch (Throwable e) {
+      throw new RuntimeException("Failed to merge record for topic " + topic, e);
+    }
+  }
+
+  @Override
+  public TopicMessageSchema getTopicSchema(String topic) {
+
+    final JsonSchema jsonSchema = schemaConverter.convert(
+        protobufSchemaPath.toUri(),
+        protobufSchema.toDescriptor()
+    );
+    final MessageSchema keySchema = new MessageSchema()
+        .name(protobufSchema.fullName())
+        .source(MessageSchema.SourceEnum.PROTO_FILE)
+        .schema(JsonSchema.stringSchema().toJson(objectMapper));
+
+    final MessageSchema valueSchema = new MessageSchema()
+        .name(protobufSchema.fullName())
+        .source(MessageSchema.SourceEnum.PROTO_FILE)
+        .schema(jsonSchema.toJson(objectMapper));
+
+    return new TopicMessageSchema()
+        .key(keySchema)
+        .value(valueSchema);
+  }
+}

+ 26 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/RecordSerDe.java

@@ -0,0 +1,26 @@
+package com.provectus.kafka.ui.serde;
+
+import com.provectus.kafka.ui.model.TopicMessageSchema;
+import javax.annotation.Nullable;
+import lombok.Value;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.utils.Bytes;
+
+public interface RecordSerDe {
+
+  @Value
+  class DeserializedKeyValue {
+    @Nullable String key;
+    @Nullable String value;
+  }
+
+  DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg);
+
+  ProducerRecord<byte[], byte[]> serialize(String topic,
+                                           @Nullable String key,
+                                           @Nullable String data,
+                                           @Nullable Integer partition);
+
+  TopicMessageSchema getTopicSchema(String topic);
+}

+ 45 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/SimpleRecordSerDe.java

@@ -0,0 +1,45 @@
+package com.provectus.kafka.ui.serde;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.provectus.kafka.ui.model.MessageSchema;
+import com.provectus.kafka.ui.model.TopicMessageSchema;
+import com.provectus.kafka.ui.util.jsonschema.JsonSchema;
+import javax.annotation.Nullable;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.utils.Bytes;
+
+public class SimpleRecordSerDe implements RecordSerDe {
+
+  @Override
+  public DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg) {
+    return new DeserializedKeyValue(
+        msg.key() != null ? new String(msg.key().get()) : null,
+        msg.value() != null ? new String(msg.value().get()) : null
+    );
+  }
+
+  @Override
+  public ProducerRecord<byte[], byte[]> serialize(String topic,
+                                                  @Nullable String key,
+                                                  @Nullable String data,
+                                                  @Nullable Integer partition) {
+    return new ProducerRecord<>(
+        topic,
+        partition,
+        key != null ? key.getBytes() : null,
+        data != null ? data.getBytes() : null
+    );
+  }
+
+  @Override
+  public TopicMessageSchema getTopicSchema(String topic) {
+    final MessageSchema schema = new MessageSchema()
+        .name("unknown")
+        .source(MessageSchema.SourceEnum.UNKNOWN)
+        .schema(JsonSchema.stringSchema().toJson(new ObjectMapper()));
+    return new TopicMessageSchema()
+        .key(schema)
+        .value(schema);
+  }
+}

+ 23 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageFormatter.java

@@ -0,0 +1,23 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.serializers.KafkaAvroDeserializer;
+import lombok.SneakyThrows;
+import org.apache.avro.generic.GenericRecord;
+
+public class AvroMessageFormatter implements MessageFormatter {
+  private final KafkaAvroDeserializer avroDeserializer;
+
+  public AvroMessageFormatter(SchemaRegistryClient client) {
+    this.avroDeserializer = new KafkaAvroDeserializer(client);
+  }
+
+  @Override
+  @SneakyThrows
+  public String format(String topic, byte[] value) {
+    GenericRecord avroRecord = (GenericRecord) avroDeserializer.deserialize(topic, value);
+    byte[] jsonBytes = AvroSchemaUtils.toJson(avroRecord);
+    return new String(jsonBytes);
+  }
+}

+ 40 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/AvroMessageReader.java

@@ -0,0 +1,40 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import io.confluent.kafka.schemaregistry.ParsedSchema;
+import io.confluent.kafka.schemaregistry.avro.AvroSchema;
+import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
+import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.serializers.KafkaAvroSerializer;
+import java.io.IOException;
+import java.util.Map;
+import org.apache.kafka.common.serialization.Serializer;
+
+public class AvroMessageReader extends MessageReader<Object> {
+
+  public AvroMessageReader(String topic, boolean isKey,
+                           SchemaRegistryClient client,
+                           SchemaMetadata schema)
+      throws IOException, RestClientException {
+    super(topic, isKey, client, schema);
+  }
+
+  @Override
+  protected Serializer<Object> createSerializer(SchemaRegistryClient client) {
+    var serializer = new KafkaAvroSerializer(client);
+    // need to call configure to set isKey property
+    serializer.configure(Map.of("schema.registry.url", "wontbeused"), isKey);
+    return serializer;
+  }
+
+  @Override
+  protected Object read(String value, ParsedSchema schema) {
+    try {
+      return AvroSchemaUtils.toObject(value, (AvroSchema) schema);
+    } catch (Throwable e) {
+      throw new RuntimeException("Failed to serialize record for topic " + topic, e);
+    }
+
+  }
+}

+ 8 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormat.java

@@ -0,0 +1,8 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+public enum MessageFormat {
+  AVRO,
+  JSON,
+  STRING,
+  PROTOBUF
+}

+ 5 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageFormatter.java

@@ -0,0 +1,5 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+public interface MessageFormatter {
+  String format(String topic, byte[] value);
+}

+ 32 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/MessageReader.java

@@ -0,0 +1,32 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import io.confluent.kafka.schemaregistry.ParsedSchema;
+import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import java.io.IOException;
+import org.apache.kafka.common.serialization.Serializer;
+
+public abstract class MessageReader<T> {
+  protected final Serializer<T> serializer;
+  protected final String topic;
+  protected final boolean isKey;
+  private final ParsedSchema schema;
+
+  protected MessageReader(String topic, boolean isKey, SchemaRegistryClient client,
+                          SchemaMetadata schema) throws IOException, RestClientException {
+    this.topic = topic;
+    this.isKey = isKey;
+    this.serializer = createSerializer(client);
+    this.schema = client.getSchemaById(schema.getId());
+  }
+
+  protected abstract Serializer<T> createSerializer(SchemaRegistryClient client);
+
+  public byte[] read(String value) {
+    final T read = this.read(value, schema);
+    return this.serializer.serialize(topic, read);
+  }
+
+  protected abstract T read(String value, ParsedSchema schema);
+}

+ 23 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageFormatter.java

@@ -0,0 +1,23 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import com.google.protobuf.Message;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
+import io.confluent.kafka.serializers.protobuf.KafkaProtobufDeserializer;
+import lombok.SneakyThrows;
+
+public class ProtobufMessageFormatter implements MessageFormatter {
+  private final KafkaProtobufDeserializer<?> protobufDeserializer;
+
+  public ProtobufMessageFormatter(SchemaRegistryClient client) {
+    this.protobufDeserializer = new KafkaProtobufDeserializer<>(client);
+  }
+
+  @Override
+  @SneakyThrows
+  public String format(String topic, byte[] value) {
+    final Message message = protobufDeserializer.deserialize(topic, value);
+    byte[] jsonBytes = ProtobufSchemaUtils.toJson(message);
+    return new String(jsonBytes);
+  }
+}

+ 44 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/ProtobufMessageReader.java

@@ -0,0 +1,44 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import com.google.protobuf.DynamicMessage;
+import com.google.protobuf.Message;
+import com.google.protobuf.util.JsonFormat;
+import io.confluent.kafka.schemaregistry.ParsedSchema;
+import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
+import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer;
+import java.io.IOException;
+import java.util.Map;
+import org.apache.kafka.common.serialization.Serializer;
+
+public class ProtobufMessageReader extends MessageReader<Message> {
+
+  public ProtobufMessageReader(String topic, boolean isKey,
+                               SchemaRegistryClient client, SchemaMetadata schema)
+      throws IOException, RestClientException {
+    super(topic, isKey, client, schema);
+  }
+
+  @Override
+  protected Serializer<Message> createSerializer(SchemaRegistryClient client) {
+    var serializer = new KafkaProtobufSerializer<>(client);
+    // need to call configure to set isKey property
+    serializer.configure(Map.of("schema.registry.url", "wontbeused"), isKey);
+    return serializer;
+  }
+
+  @Override
+  protected Message read(String value, ParsedSchema schema) {
+    ProtobufSchema protobufSchema = (ProtobufSchema) schema;
+    DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
+    try {
+      JsonFormat.parser().merge(value, builder);
+      return builder.build();
+    } catch (Throwable e) {
+      throw new RuntimeException("Failed to serialize record for topic " + topic, e);
+    }
+  }
+
+}

+ 288 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/SchemaRegistryAwareRecordSerDe.java

@@ -0,0 +1,288 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.MessageSchema;
+import com.provectus.kafka.ui.model.TopicMessageSchema;
+import com.provectus.kafka.ui.serde.RecordSerDe;
+import com.provectus.kafka.ui.util.jsonschema.AvroJsonSchemaConverter;
+import com.provectus.kafka.ui.util.jsonschema.JsonSchema;
+import com.provectus.kafka.ui.util.jsonschema.ProtobufSchemaConverter;
+import io.confluent.kafka.schemaregistry.ParsedSchema;
+import io.confluent.kafka.schemaregistry.SchemaProvider;
+import io.confluent.kafka.schemaregistry.avro.AvroSchema;
+import io.confluent.kafka.schemaregistry.avro.AvroSchemaProvider;
+import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient;
+import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
+import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaProvider;
+import java.net.URI;
+import java.nio.ByteBuffer;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import javax.annotation.Nullable;
+import lombok.SneakyThrows;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.utils.Bytes;
+
+@Log4j2
+public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
+
+  private static final int CLIENT_IDENTITY_MAP_CAPACITY = 100;
+
+  private final KafkaCluster cluster;
+  private final Map<String, MessageFormatter> valueFormatMap = new ConcurrentHashMap<>();
+  private final Map<String, MessageFormatter> keyFormatMap = new ConcurrentHashMap<>();
+
+  @Nullable
+  private final SchemaRegistryClient schemaRegistryClient;
+
+  @Nullable
+  private final AvroMessageFormatter avroFormatter;
+
+  @Nullable
+  private final ProtobufMessageFormatter protobufFormatter;
+
+  private final StringMessageFormatter stringFormatter = new StringMessageFormatter();
+  private final ProtobufSchemaConverter protoSchemaConverter = new ProtobufSchemaConverter();
+  private final AvroJsonSchemaConverter avroSchemaConverter = new AvroJsonSchemaConverter();
+  private final ObjectMapper objectMapper = new ObjectMapper();
+
+  private static SchemaRegistryClient createSchemaRegistryClient(KafkaCluster cluster) {
+    Objects.requireNonNull(cluster.getSchemaRegistry());
+    List<SchemaProvider> schemaProviders =
+        List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider());
+    //TODO add auth
+    return new CachedSchemaRegistryClient(
+        Collections.singletonList(cluster.getSchemaRegistry()),
+        CLIENT_IDENTITY_MAP_CAPACITY,
+        schemaProviders,
+        Collections.emptyMap()
+    );
+  }
+
+  public SchemaRegistryAwareRecordSerDe(KafkaCluster cluster) {
+    this.cluster = cluster;
+    this.schemaRegistryClient = cluster.getSchemaRegistry() != null
+        ? createSchemaRegistryClient(cluster)
+        : null;
+    if (schemaRegistryClient != null) {
+      this.avroFormatter = new AvroMessageFormatter(schemaRegistryClient);
+      this.protobufFormatter = new ProtobufMessageFormatter(schemaRegistryClient);
+    } else {
+      this.avroFormatter = null;
+      this.protobufFormatter = null;
+    }
+  }
+
+  public DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg) {
+    try {
+      return new DeserializedKeyValue(
+          msg.key() != null
+              ? getMessageFormatter(msg, true).format(msg.topic(), msg.key().get())
+              : null,
+          msg.value() != null
+              ? getMessageFormatter(msg, false).format(msg.topic(), msg.value().get())
+              : null
+      );
+    } catch (Throwable e) {
+      throw new RuntimeException("Failed to parse record from topic " + msg.topic(), e);
+    }
+  }
+
+  @Override
+  public ProducerRecord<byte[], byte[]> serialize(String topic,
+                                                  @Nullable String key,
+                                                  @Nullable String data,
+                                                  @Nullable Integer partition) {
+    final Optional<SchemaMetadata> maybeValueSchema = getSchemaBySubject(topic, false);
+    final Optional<SchemaMetadata> maybeKeySchema = getSchemaBySubject(topic, true);
+
+    final byte[] serializedValue = data != null
+        ? serialize(maybeValueSchema, topic, data, false)
+        : null;
+    final byte[] serializedKey = key != null
+        ? serialize(maybeKeySchema, topic, key, true)
+        : null;
+
+    return new ProducerRecord<>(topic, partition, serializedKey, serializedValue);
+  }
+
+  @SneakyThrows
+  private byte[] serialize(
+      Optional<SchemaMetadata> maybeSchema, String topic, String value, boolean isKey) {
+    if (maybeSchema.isPresent()) {
+      final SchemaMetadata schema = maybeSchema.get();
+
+      MessageReader<?> reader;
+      if (schema.getSchemaType().equals(MessageFormat.PROTOBUF.name())) {
+        reader = new ProtobufMessageReader(topic, isKey, schemaRegistryClient, schema);
+      } else if (schema.getSchemaType().equals(MessageFormat.AVRO.name())) {
+        reader = new AvroMessageReader(topic, isKey, schemaRegistryClient, schema);
+      } else {
+        throw new IllegalStateException("Unsupported schema type: " + schema.getSchemaType());
+      }
+
+      return reader.read(value);
+    } else {
+      // if no schema provided serialize input as raw string
+      return value.getBytes();
+    }
+  }
+
+  @Override
+  public TopicMessageSchema getTopicSchema(String topic) {
+    final Optional<SchemaMetadata> maybeValueSchema = getSchemaBySubject(topic, false);
+    final Optional<SchemaMetadata> maybeKeySchema = getSchemaBySubject(topic, true);
+
+    String sourceValueSchema = maybeValueSchema.map(this::convertSchema)
+        .orElseGet(() -> JsonSchema.stringSchema().toJson(objectMapper));
+
+    String sourceKeySchema = maybeKeySchema.map(this::convertSchema)
+        .orElseGet(() -> JsonSchema.stringSchema().toJson(objectMapper));
+
+    final MessageSchema keySchema = new MessageSchema()
+        .name(maybeKeySchema.map(
+            (s) -> schemaSubject(topic, true)
+        ).orElse("unknown"))
+        .source(MessageSchema.SourceEnum.SCHEMA_REGISTRY)
+        .schema(sourceKeySchema);
+
+    final MessageSchema valueSchema = new MessageSchema()
+        .name(maybeValueSchema.map(
+            (s) -> schemaSubject(topic, false)
+        ).orElse("unknown"))
+        .source(MessageSchema.SourceEnum.SCHEMA_REGISTRY)
+        .schema(sourceValueSchema);
+
+    return new TopicMessageSchema()
+        .key(keySchema)
+        .value(valueSchema);
+  }
+
+  @SneakyThrows
+  private String convertSchema(SchemaMetadata schema) {
+
+    String jsonSchema;
+    URI basePath = new URI(cluster.getSchemaRegistry()).resolve(Integer.toString(schema.getId()));
+    final ParsedSchema schemaById = Objects.requireNonNull(schemaRegistryClient)
+        .getSchemaById(schema.getId());
+
+    if (schema.getSchemaType().equals(MessageFormat.PROTOBUF.name())) {
+      final ProtobufSchema protobufSchema = (ProtobufSchema) schemaById;
+      jsonSchema = protoSchemaConverter
+          .convert(basePath, protobufSchema.toDescriptor())
+          .toJson(objectMapper);
+    } else if (schema.getSchemaType().equals(MessageFormat.AVRO.name())) {
+      final AvroSchema avroSchema = (AvroSchema) schemaById;
+      jsonSchema = avroSchemaConverter
+          .convert(basePath, avroSchema.rawSchema())
+          .toJson(objectMapper);
+    } else if (schema.getSchemaType().equals(MessageFormat.JSON.name())) {
+      jsonSchema = schema.getSchema();
+    } else {
+      jsonSchema = JsonSchema.stringSchema().toJson(objectMapper);
+    }
+
+    return jsonSchema;
+  }
+
+  private MessageFormatter getMessageFormatter(ConsumerRecord<Bytes, Bytes> msg, boolean isKey) {
+    if (isKey) {
+      return keyFormatMap.computeIfAbsent(msg.topic(), k -> detectFormat(msg, true));
+    } else {
+      return valueFormatMap.computeIfAbsent(msg.topic(), k -> detectFormat(msg, false));
+    }
+  }
+
+  private MessageFormatter detectFormat(ConsumerRecord<Bytes, Bytes> msg, boolean isKey) {
+    if (schemaRegistryClient != null) {
+      try {
+        final Optional<String> type = getSchemaFromMessage(msg, isKey)
+            .or(() -> getSchemaBySubject(msg.topic(), isKey).map(SchemaMetadata::getSchemaType));
+        if (type.isPresent()) {
+          if (type.get().equals(MessageFormat.PROTOBUF.name())) {
+            if (tryFormatter(protobufFormatter, msg, isKey).isPresent()) {
+              return protobufFormatter;
+            }
+          } else if (type.get().equals(MessageFormat.AVRO.name())) {
+            if (tryFormatter(avroFormatter, msg, isKey).isPresent()) {
+              return avroFormatter;
+            }
+          } else {
+            throw new IllegalStateException("Unsupported schema type: " + type.get());
+          }
+        }
+      } catch (Exception e) {
+        log.warn("Failed to get Schema for topic {}", msg.topic(), e);
+      }
+    }
+    return stringFormatter;
+  }
+
+  private Optional<MessageFormatter> tryFormatter(
+      MessageFormatter formatter, ConsumerRecord<Bytes, Bytes> msg, boolean isKey) {
+    try {
+      formatter.format(msg.topic(), isKey ? msg.key().get() : msg.value().get());
+      return Optional.of(formatter);
+    } catch (Throwable e) {
+      log.warn("Failed to parse by {} from topic {}", formatter.getClass(), msg.topic(), e);
+    }
+
+    return Optional.empty();
+  }
+
+  @SneakyThrows
+  private Optional<String> getSchemaFromMessage(ConsumerRecord<Bytes, Bytes> msg, boolean isKey) {
+    Optional<String> result = Optional.empty();
+    final Bytes value = isKey ? msg.key() : msg.value();
+    if (value != null) {
+      ByteBuffer buffer = ByteBuffer.wrap(value.get());
+      if (buffer.get() == 0) {
+        int id = buffer.getInt();
+        result =
+            Optional.ofNullable(schemaRegistryClient)
+                .flatMap(client -> wrapClientCall(() -> client.getSchemaById(id)))
+                .map(ParsedSchema::schemaType);
+      }
+    }
+    return result;
+  }
+
+  @SneakyThrows
+  private Optional<SchemaMetadata> getSchemaBySubject(String topic, boolean isKey) {
+    return Optional.ofNullable(schemaRegistryClient)
+        .flatMap(client ->
+            wrapClientCall(() ->
+                client.getLatestSchemaMetadata(schemaSubject(topic, isKey))));
+  }
+
+  @SneakyThrows
+  private <T> Optional<T> wrapClientCall(Callable<T> call) {
+    try {
+      return Optional.ofNullable(call.call());
+    } catch (RestClientException restClientException) {
+      if (restClientException.getStatus() == 404) {
+        return Optional.empty();
+      } else {
+        throw new RuntimeException("Error calling SchemaRegistryClient", restClientException);
+      }
+    }
+  }
+
+  private String schemaSubject(String topic, boolean isKey) {
+    return String.format(
+        isKey ? cluster.getKeySchemaNameTemplate()
+            : cluster.getSchemaNameTemplate(), topic
+    );
+  }
+}

+ 11 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/serde/schemaregistry/StringMessageFormatter.java

@@ -0,0 +1,11 @@
+package com.provectus.kafka.ui.serde.schemaregistry;
+
+import java.nio.charset.StandardCharsets;
+
+public class StringMessageFormatter implements MessageFormatter {
+
+  @Override
+  public String format(String topic, byte[] value) {
+    return new String(value, StandardCharsets.UTF_8);
+  }
+}

+ 127 - 39
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClusterService.java

@@ -1,7 +1,10 @@
 package com.provectus.kafka.ui.service;
 package com.provectus.kafka.ui.service;
 
 
 import com.provectus.kafka.ui.exception.ClusterNotFoundException;
 import com.provectus.kafka.ui.exception.ClusterNotFoundException;
+import com.provectus.kafka.ui.exception.IllegalEntityStateException;
+import com.provectus.kafka.ui.exception.NotFoundException;
 import com.provectus.kafka.ui.exception.TopicNotFoundException;
 import com.provectus.kafka.ui.exception.TopicNotFoundException;
+import com.provectus.kafka.ui.exception.ValidationException;
 import com.provectus.kafka.ui.mapper.ClusterMapper;
 import com.provectus.kafka.ui.mapper.ClusterMapper;
 import com.provectus.kafka.ui.model.Broker;
 import com.provectus.kafka.ui.model.Broker;
 import com.provectus.kafka.ui.model.BrokerMetrics;
 import com.provectus.kafka.ui.model.BrokerMetrics;
@@ -11,37 +14,46 @@ import com.provectus.kafka.ui.model.ClusterStats;
 import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.ConsumerGroupDetails;
 import com.provectus.kafka.ui.model.ConsumerGroupDetails;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.CreateTopicMessage;
+import com.provectus.kafka.ui.model.ExtendedAdminClient;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.PartitionsIncrease;
+import com.provectus.kafka.ui.model.PartitionsIncreaseResponse;
+import com.provectus.kafka.ui.model.ReplicationFactorChange;
+import com.provectus.kafka.ui.model.ReplicationFactorChangeResponse;
 import com.provectus.kafka.ui.model.Topic;
 import com.provectus.kafka.ui.model.Topic;
 import com.provectus.kafka.ui.model.TopicColumnsToSort;
 import com.provectus.kafka.ui.model.TopicColumnsToSort;
 import com.provectus.kafka.ui.model.TopicConfig;
 import com.provectus.kafka.ui.model.TopicConfig;
-import com.provectus.kafka.ui.model.TopicConsumerGroups;
 import com.provectus.kafka.ui.model.TopicCreation;
 import com.provectus.kafka.ui.model.TopicCreation;
 import com.provectus.kafka.ui.model.TopicDetails;
 import com.provectus.kafka.ui.model.TopicDetails;
 import com.provectus.kafka.ui.model.TopicMessage;
 import com.provectus.kafka.ui.model.TopicMessage;
+import com.provectus.kafka.ui.model.TopicMessageSchema;
 import com.provectus.kafka.ui.model.TopicUpdate;
 import com.provectus.kafka.ui.model.TopicUpdate;
 import com.provectus.kafka.ui.model.TopicsResponse;
 import com.provectus.kafka.ui.model.TopicsResponse;
+import com.provectus.kafka.ui.serde.DeserializationService;
 import com.provectus.kafka.ui.util.ClusterUtil;
 import com.provectus.kafka.ui.util.ClusterUtil;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Comparator;
 import java.util.List;
 import java.util.List;
-import java.util.Map;
 import java.util.Optional;
 import java.util.Optional;
 import java.util.function.Predicate;
 import java.util.function.Predicate;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
-import java.util.stream.Stream;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.SneakyThrows;
 import lombok.SneakyThrows;
+import lombok.extern.log4j.Log4j2;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.clients.admin.DeleteConsumerGroupsResult;
+import org.apache.kafka.common.errors.GroupIdNotFoundException;
+import org.apache.kafka.common.errors.GroupNotEmptyException;
+import org.jetbrains.annotations.NotNull;
 import org.springframework.stereotype.Service;
 import org.springframework.stereotype.Service;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 import reactor.core.publisher.Mono;
-import reactor.util.function.Tuples;
 
 
 @Service
 @Service
 @RequiredArgsConstructor
 @RequiredArgsConstructor
+@Log4j2
 public class ClusterService {
 public class ClusterService {
   private static final Integer DEFAULT_PAGE_SIZE = 25;
   private static final Integer DEFAULT_PAGE_SIZE = 25;
 
 
@@ -49,6 +61,7 @@ public class ClusterService {
   private final ClusterMapper clusterMapper;
   private final ClusterMapper clusterMapper;
   private final KafkaService kafkaService;
   private final KafkaService kafkaService;
   private final ConsumingService consumingService;
   private final ConsumingService consumingService;
+  private final DeserializationService deserializationService;
 
 
   public List<Cluster> getClusters() {
   public List<Cluster> getClusters() {
     return clustersStorage.getKafkaClusters()
     return clustersStorage.getKafkaClusters()
@@ -91,7 +104,7 @@ public class ClusterService {
     var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
     var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
     var cluster = clustersStorage.getClusterByName(name)
     var cluster = clustersStorage.getClusterByName(name)
         .orElseThrow(ClusterNotFoundException::new);
         .orElseThrow(ClusterNotFoundException::new);
-    List<Topic> topics = cluster.getTopics().values().stream()
+    List<InternalTopic> topics = cluster.getTopics().values().stream()
         .filter(topic -> !topic.isInternal()
         .filter(topic -> !topic.isInternal()
             || showInternal
             || showInternal
             .map(i -> topic.isInternal() == i)
             .map(i -> topic.isInternal() == i)
@@ -101,7 +114,6 @@ public class ClusterService {
                 .map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
                 .map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
                 .orElse(true))
                 .orElse(true))
         .sorted(getComparatorForTopic(sortBy))
         .sorted(getComparatorForTopic(sortBy))
-        .map(clusterMapper::toTopic)
         .collect(Collectors.toList());
         .collect(Collectors.toList());
     var totalPages = (topics.size() / perPage)
     var totalPages = (topics.size() / perPage)
         + (topics.size() % perPage == 0 ? 0 : 1);
         + (topics.size() % perPage == 0 ? 0 : 1);
@@ -111,6 +123,13 @@ public class ClusterService {
             topics.stream()
             topics.stream()
                 .skip(topicsToSkip)
                 .skip(topicsToSkip)
                 .limit(perPage)
                 .limit(perPage)
+                .map(t ->
+                    clusterMapper.toTopic(
+                        t.toBuilder().partitions(
+                          kafkaService.getTopicPartitions(cluster, t)
+                        ).build()
+                    )
+                )
                 .collect(Collectors.toList())
                 .collect(Collectors.toList())
         );
         );
   }
   }
@@ -125,6 +144,8 @@ public class ClusterService {
         return Comparator.comparing(InternalTopic::getPartitionCount);
         return Comparator.comparing(InternalTopic::getPartitionCount);
       case OUT_OF_SYNC_REPLICAS:
       case OUT_OF_SYNC_REPLICAS:
         return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
         return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
+      case REPLICATION_FACTOR:
+        return Comparator.comparing(InternalTopic::getReplicationFactor);
       case NAME:
       case NAME:
       default:
       default:
         return defaultComparator;
         return defaultComparator;
@@ -164,46 +185,26 @@ public class ClusterService {
   public Mono<ConsumerGroupDetails> getConsumerGroupDetail(String clusterName,
   public Mono<ConsumerGroupDetails> getConsumerGroupDetail(String clusterName,
                                                            String consumerGroupId) {
                                                            String consumerGroupId) {
     var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
     var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
-
-    return kafkaService.getOrCreateAdminClient(cluster).map(ac ->
-        ac.getAdminClient().describeConsumerGroups(Collections.singletonList(consumerGroupId)).all()
-    ).flatMap(groups ->
-        kafkaService.groupMetadata(cluster, consumerGroupId)
-            .flatMap(offsets -> {
-              Map<TopicPartition, Long> endOffsets =
-                  kafkaService.topicPartitionsEndOffsets(cluster, offsets.keySet());
-              return ClusterUtil.toMono(groups).map(s ->
-                  Tuples.of(
-                      s.get(consumerGroupId),
-                      s.get(consumerGroupId).members().stream()
-                          .flatMap(c ->
-                              Stream.of(
-                                  ClusterUtil.convertToConsumerTopicPartitionDetails(
-                                      c, offsets, endOffsets, consumerGroupId
-                                  )
-                              )
-                          )
-                          .collect(Collectors.toList()).stream()
-                          .flatMap(t ->
-                              t.stream().flatMap(Stream::of)
-                          ).collect(Collectors.toList())
-                  )
-              );
-            }).map(c -> ClusterUtil.convertToConsumerGroupDetails(c.getT1(), c.getT2()))
+    return kafkaService.getConsumerGroups(
+        cluster,
+        Optional.empty(),
+        Collections.singletonList(consumerGroupId)
+    ).filter(groups -> !groups.isEmpty()).map(groups -> groups.get(0)).map(
+        ClusterUtil::convertToConsumerGroupDetails
     );
     );
   }
   }
 
 
   public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
   public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
-    return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
-        .switchIfEmpty(Mono.error(ClusterNotFoundException::new))
-        .flatMap(kafkaService::getConsumerGroups);
+    return getConsumerGroups(clusterName, Optional.empty());
   }
   }
 
 
-  public Mono<TopicConsumerGroups> getTopicConsumerGroupDetail(
-      String clusterName, String topicName) {
+  public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName, Optional<String> topic) {
     return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
     return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
         .switchIfEmpty(Mono.error(ClusterNotFoundException::new))
         .switchIfEmpty(Mono.error(ClusterNotFoundException::new))
-        .flatMap(c -> kafkaService.getTopicConsumerGroups(c, topicName));
+        .flatMap(c -> kafkaService.getConsumerGroups(c, topic, Collections.emptyList()))
+        .map(c ->
+            c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList())
+        );
   }
   }
 
 
   public Flux<Broker> getBrokers(String clusterName) {
   public Flux<Broker> getBrokers(String clusterName) {
@@ -253,6 +254,15 @@ public class ClusterService {
     return updatedCluster;
     return updatedCluster;
   }
   }
 
 
+  public Mono<Cluster> updateCluster(String clusterName) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> kafkaService.getUpdatedCluster(cluster)
+            .doOnNext(updatedCluster -> clustersStorage
+                .setKafkaCluster(updatedCluster.getName(), updatedCluster))
+            .map(clusterMapper::toCluster))
+        .orElse(Mono.error(new ClusterNotFoundException()));
+  }
+
   public Flux<TopicMessage> getMessages(String clusterName, String topicName,
   public Flux<TopicMessage> getMessages(String clusterName, String topicName,
                                         ConsumerPosition consumerPosition, String query,
                                         ConsumerPosition consumerPosition, String query,
                                         Integer limit) {
                                         Integer limit) {
@@ -272,5 +282,83 @@ public class ClusterService {
         .flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
         .flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
   }
   }
 
 
+  public Mono<PartitionsIncreaseResponse> increaseTopicPartitions(
+      String clusterName,
+      String topicName,
+      PartitionsIncrease partitionsIncrease) {
+    return clustersStorage.getClusterByName(clusterName).map(cluster ->
+        kafkaService.increaseTopicPartitions(cluster, topicName, partitionsIncrease)
+            .doOnNext(t -> updateCluster(t, cluster.getName(), cluster))
+            .map(t -> new PartitionsIncreaseResponse()
+                .topicName(t.getName())
+                .totalPartitionsCount(t.getPartitionCount())))
+        .orElse(Mono.error(new ClusterNotFoundException(
+            String.format("No cluster for name '%s'", clusterName)
+        )));
+  }
 
 
+  public Mono<Void> deleteConsumerGroupById(String clusterName,
+                                            String groupId) {
+    return clustersStorage.getClusterByName(clusterName)
+        .map(cluster -> kafkaService.getOrCreateAdminClient(cluster)
+            .map(ExtendedAdminClient::getAdminClient)
+            .map(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)))
+            .map(DeleteConsumerGroupsResult::all)
+            .flatMap(ClusterUtil::toMono)
+            .onErrorResume(this::reThrowCustomException)
+        )
+        .orElse(Mono.empty());
+  }
+
+  public TopicMessageSchema getTopicSchema(String clusterName, String topicName) {
+    var cluster = clustersStorage.getClusterByName(clusterName)
+        .orElseThrow(ClusterNotFoundException::new);
+    if (!cluster.getTopics().containsKey(topicName)) {
+      throw new TopicNotFoundException();
+    }
+    return deserializationService
+        .getRecordDeserializerForCluster(cluster)
+        .getTopicSchema(topicName);
+  }
+
+  public Mono<Void> sendMessage(String clusterName, String topicName, CreateTopicMessage msg) {
+    var cluster = clustersStorage.getClusterByName(clusterName)
+        .orElseThrow(ClusterNotFoundException::new);
+    if (!cluster.getTopics().containsKey(topicName)) {
+      throw new TopicNotFoundException();
+    }
+    if (msg.getKey() == null && msg.getContent() == null) {
+      throw new ValidationException("Invalid message: both key and value can't be null");
+    }
+    if (msg.getPartition() != null
+        && msg.getPartition() > cluster.getTopics().get(topicName).getPartitionCount() - 1) {
+      throw new ValidationException("Invalid partition");
+    }
+    return kafkaService.sendMessage(cluster, topicName, msg).then();
+  }
+
+  @NotNull
+  private Mono<Void> reThrowCustomException(Throwable e) {
+    if (e instanceof GroupIdNotFoundException) {
+      return Mono.error(new NotFoundException("The group id does not exist"));
+    } else if (e instanceof GroupNotEmptyException) {
+      return Mono.error(new IllegalEntityStateException("The group is not empty"));
+    } else {
+      return Mono.error(e);
+    }
+  }
+
+  public Mono<ReplicationFactorChangeResponse> changeReplicationFactor(
+      String clusterName,
+      String topicName,
+      ReplicationFactorChange replicationFactorChange) {
+    return clustersStorage.getClusterByName(clusterName).map(cluster ->
+        kafkaService.changeReplicationFactor(cluster, topicName, replicationFactorChange)
+            .doOnNext(topic -> updateCluster(topic, cluster.getName(), cluster))
+            .map(t -> new ReplicationFactorChangeResponse()
+                .topicName(t.getName())
+                .totalReplicationFactor(t.getReplicationFactor())))
+        .orElse(Mono.error(new ClusterNotFoundException(
+            String.format("No cluster for name '%s'", clusterName))));
+  }
 }
 }

+ 5 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersMetricsScheduler.java

@@ -17,13 +17,15 @@ public class ClustersMetricsScheduler {
 
 
   private final MetricsUpdateService metricsUpdateService;
   private final MetricsUpdateService metricsUpdateService;
 
 
-  @Scheduled(fixedRate = 30000)
+  @Scheduled(fixedRateString = "${kafka.update-metrics-rate-millis:30000}")
   public void updateMetrics() {
   public void updateMetrics() {
     Flux.fromIterable(clustersStorage.getKafkaClustersMap().entrySet())
     Flux.fromIterable(clustersStorage.getKafkaClustersMap().entrySet())
-        .subscribeOn(Schedulers.parallel())
+        .parallel()
+        .runOn(Schedulers.parallel())
         .map(Map.Entry::getValue)
         .map(Map.Entry::getValue)
         .flatMap(metricsUpdateService::updateMetrics)
         .flatMap(metricsUpdateService::updateMetrics)
         .doOnNext(s -> clustersStorage.setKafkaCluster(s.getName(), s))
         .doOnNext(s -> clustersStorage.setKafkaCluster(s.getName(), s))
-        .subscribe();
+        .then()
+        .block();
   }
   }
 }
 }

+ 24 - 167
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ConsumingService.java

@@ -1,29 +1,27 @@
 package com.provectus.kafka.ui.service;
 package com.provectus.kafka.ui.service;
 
 
-import com.fasterxml.jackson.databind.JsonNode;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectMapper;
-import com.provectus.kafka.ui.deserialization.DeserializationService;
-import com.provectus.kafka.ui.deserialization.RecordDeserializer;
+import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
+import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.SeekType;
+import com.provectus.kafka.ui.model.SeekDirection;
 import com.provectus.kafka.ui.model.TopicMessage;
 import com.provectus.kafka.ui.model.TopicMessage;
+import com.provectus.kafka.ui.serde.DeserializationService;
+import com.provectus.kafka.ui.serde.RecordSerDe;
 import com.provectus.kafka.ui.util.ClusterUtil;
 import com.provectus.kafka.ui.util.ClusterUtil;
-import java.time.Duration;
+import com.provectus.kafka.ui.util.OffsetsSeekBackward;
+import com.provectus.kafka.ui.util.OffsetsSeekForward;
 import java.util.Collection;
 import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Optional;
-import java.util.function.Supplier;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.log4j.Log4j2;
 import lombok.extern.log4j.Log4j2;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.Consumer;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.ConsumerRecords;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
@@ -51,10 +49,20 @@ public class ConsumingService {
     int recordsLimit = Optional.ofNullable(limit)
     int recordsLimit = Optional.ofNullable(limit)
         .map(s -> Math.min(s, MAX_RECORD_LIMIT))
         .map(s -> Math.min(s, MAX_RECORD_LIMIT))
         .orElse(DEFAULT_RECORD_LIMIT);
         .orElse(DEFAULT_RECORD_LIMIT);
-    RecordEmitter emitter = new RecordEmitter(
-        () -> kafkaService.createConsumer(cluster),
-        new OffsetsSeek(topic, consumerPosition));
-    RecordDeserializer recordDeserializer =
+
+    java.util.function.Consumer<? super FluxSink<ConsumerRecord<Bytes, Bytes>>> emitter;
+    if (consumerPosition.getSeekDirection().equals(SeekDirection.FORWARD)) {
+      emitter = new ForwardRecordEmitter(
+          () -> kafkaService.createConsumer(cluster),
+          new OffsetsSeekForward(topic, consumerPosition)
+      );
+    } else {
+      emitter = new BackwardRecordEmitter(
+          (Map<String, Object> props) -> kafkaService.createConsumer(cluster, props),
+          new OffsetsSeekBackward(topic, consumerPosition, recordsLimit)
+      );
+    }
+    RecordSerDe recordDeserializer =
         deserializationService.getRecordDeserializerForCluster(cluster);
         deserializationService.getRecordDeserializerForCluster(cluster);
     return Flux.create(emitter)
     return Flux.create(emitter)
         .subscribeOn(Schedulers.boundedElastic())
         .subscribeOn(Schedulers.boundedElastic())
@@ -79,7 +87,7 @@ public class ConsumingService {
    * returns end offsets for partitions where start offset != end offsets.
    * returns end offsets for partitions where start offset != end offsets.
    * This is useful when we need to verify that partition is not empty.
    * This is useful when we need to verify that partition is not empty.
    */
    */
-  private static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
+  public static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
                                                               String topicName,
                                                               String topicName,
                                                               Collection<Integer>
                                                               Collection<Integer>
                                                                   partitionsToInclude) {
                                                                   partitionsToInclude) {
@@ -98,159 +106,8 @@ public class ConsumingService {
     if (StringUtils.isEmpty(query)) {
     if (StringUtils.isEmpty(query)) {
       return true;
       return true;
     }
     }
-
-    Object content = message.getContent();
-    JsonNode tree = objectMapper.valueToTree(content);
-    return treeContainsValue(tree, query);
-  }
-
-  private boolean treeContainsValue(JsonNode tree, String query) {
-    LinkedList<JsonNode> nodesForSearch = new LinkedList<>();
-    nodesForSearch.add(tree);
-
-    while (!nodesForSearch.isEmpty()) {
-      JsonNode node = nodesForSearch.removeFirst();
-
-      if (node.isContainerNode()) {
-        node.elements().forEachRemaining(nodesForSearch::add);
-        continue;
-      }
-
-      String nodeValue = node.asText();
-      if (nodeValue.contains(query)) {
-        return true;
-      }
-    }
-
-    return false;
-  }
-
-  @RequiredArgsConstructor
-  static class RecordEmitter
-      implements java.util.function.Consumer<FluxSink<ConsumerRecord<Bytes, Bytes>>> {
-
-    private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
-
-    private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
-    private final OffsetsSeek offsetsSeek;
-
-    @Override
-    public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
-      try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
-        var waitingOffsets = offsetsSeek.assignAndSeek(consumer);
-        while (!sink.isCancelled() && !waitingOffsets.endReached()) {
-          ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
-          log.info("{} records polled", records.count());
-          for (ConsumerRecord<Bytes, Bytes> record : records) {
-            if (!sink.isCancelled() && !waitingOffsets.endReached()) {
-              sink.next(record);
-              waitingOffsets.markPolled(record);
-            } else {
-              break;
-            }
-          }
-        }
-        sink.complete();
-        log.info("Polling finished");
-      } catch (Exception e) {
-        log.error("Error occurred while consuming records", e);
-        throw new RuntimeException(e);
-      }
-    }
+    return (StringUtils.isNotEmpty(message.getKey()) && message.getKey().contains(query))
+         || (StringUtils.isNotEmpty(message.getContent()) && message.getContent().contains(query));
   }
   }
 
 
-  @RequiredArgsConstructor
-  static class OffsetsSeek {
-
-    private final String topic;
-    private final ConsumerPosition consumerPosition;
-
-    public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
-      SeekType seekType = consumerPosition.getSeekType();
-      log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
-      switch (seekType) {
-        case OFFSET:
-          assignAndSeekForOffset(consumer);
-          break;
-        case TIMESTAMP:
-          assignAndSeekForTimestamp(consumer);
-          break;
-        case BEGINNING:
-          assignAndSeekFromBeginning(consumer);
-          break;
-        default:
-          throw new IllegalArgumentException("Unknown seekType: " + seekType);
-      }
-      log.info("Assignment: {}", consumer.assignment());
-      return new WaitingOffsets(topic, consumer);
-    }
-
-    private List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
-      Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
-      return consumer.partitionsFor(topic).stream()
-          .filter(
-              p -> partitionPositions.isEmpty() || partitionPositions.containsKey(p.partition()))
-          .map(p -> new TopicPartition(p.topic(), p.partition()))
-          .collect(Collectors.toList());
-    }
-
-    private void assignAndSeekForOffset(Consumer<Bytes, Bytes> consumer) {
-      List<TopicPartition> partitions = getRequestedPartitions(consumer);
-      consumer.assign(partitions);
-      consumerPosition.getSeekTo().forEach((partition, offset) -> {
-        TopicPartition topicPartition = new TopicPartition(topic, partition);
-        consumer.seek(topicPartition, offset);
-      });
-    }
-
-    private void assignAndSeekForTimestamp(Consumer<Bytes, Bytes> consumer) {
-      Map<TopicPartition, Long> timestampsToSearch =
-          consumerPosition.getSeekTo().entrySet().stream()
-              .collect(Collectors.toMap(
-                  partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
-                  Map.Entry::getValue
-              ));
-      Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
-          .entrySet().stream()
-          .filter(e -> e.getValue() != null)
-          .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
-
-      if (offsetsForTimestamps.isEmpty()) {
-        throw new IllegalArgumentException("No offsets were found for requested timestamps");
-      }
-
-      consumer.assign(offsetsForTimestamps.keySet());
-      offsetsForTimestamps.forEach(consumer::seek);
-    }
-
-    private void assignAndSeekFromBeginning(Consumer<Bytes, Bytes> consumer) {
-      List<TopicPartition> partitions = getRequestedPartitions(consumer);
-      consumer.assign(partitions);
-      consumer.seekToBeginning(partitions);
-    }
-
-    static class WaitingOffsets {
-      final Map<Integer, Long> offsets = new HashMap<>(); // partition number -> offset
-
-      WaitingOffsets(String topic, Consumer<?, ?> consumer) {
-        var partitions = consumer.assignment().stream()
-            .map(TopicPartition::partition)
-            .collect(Collectors.toList());
-        significantOffsets(consumer, topic, partitions)
-            .forEach((tp, offset) -> offsets.put(tp.partition(), offset - 1));
-      }
-
-      void markPolled(ConsumerRecord<?, ?> rec) {
-        Long waiting = offsets.get(rec.partition());
-        if (waiting != null && waiting <= rec.offset()) {
-          offsets.remove(rec.partition());
-        }
-      }
-
-      boolean endReached() {
-        return offsets.isEmpty();
-      }
-    }
-
-  }
 }
 }

+ 290 - 47
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaService.java

@@ -1,37 +1,48 @@
 package com.provectus.kafka.ui.service;
 package com.provectus.kafka.ui.service;
 
 
-import com.provectus.kafka.ui.model.ConsumerGroup;
+import com.provectus.kafka.ui.exception.ValidationException;
+import com.provectus.kafka.ui.model.CreateTopicMessage;
 import com.provectus.kafka.ui.model.ExtendedAdminClient;
 import com.provectus.kafka.ui.model.ExtendedAdminClient;
 import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
 import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
 import com.provectus.kafka.ui.model.InternalBrokerMetrics;
 import com.provectus.kafka.ui.model.InternalBrokerMetrics;
 import com.provectus.kafka.ui.model.InternalClusterMetrics;
 import com.provectus.kafka.ui.model.InternalClusterMetrics;
+import com.provectus.kafka.ui.model.InternalConsumerGroup;
 import com.provectus.kafka.ui.model.InternalPartition;
 import com.provectus.kafka.ui.model.InternalPartition;
+import com.provectus.kafka.ui.model.InternalReplica;
 import com.provectus.kafka.ui.model.InternalSegmentSizeDto;
 import com.provectus.kafka.ui.model.InternalSegmentSizeDto;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.InternalTopicConfig;
 import com.provectus.kafka.ui.model.InternalTopicConfig;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.Metric;
 import com.provectus.kafka.ui.model.Metric;
+import com.provectus.kafka.ui.model.PartitionsIncrease;
+import com.provectus.kafka.ui.model.ReplicationFactorChange;
 import com.provectus.kafka.ui.model.ServerStatus;
 import com.provectus.kafka.ui.model.ServerStatus;
-import com.provectus.kafka.ui.model.TopicConsumerGroups;
 import com.provectus.kafka.ui.model.TopicCreation;
 import com.provectus.kafka.ui.model.TopicCreation;
 import com.provectus.kafka.ui.model.TopicUpdate;
 import com.provectus.kafka.ui.model.TopicUpdate;
+import com.provectus.kafka.ui.serde.DeserializationService;
+import com.provectus.kafka.ui.serde.RecordSerDe;
 import com.provectus.kafka.ui.util.ClusterUtil;
 import com.provectus.kafka.ui.util.ClusterUtil;
 import com.provectus.kafka.ui.util.JmxClusterUtil;
 import com.provectus.kafka.ui.util.JmxClusterUtil;
 import com.provectus.kafka.ui.util.JmxMetricsName;
 import com.provectus.kafka.ui.util.JmxMetricsName;
 import com.provectus.kafka.ui.util.JmxMetricsValueName;
 import com.provectus.kafka.ui.util.JmxMetricsValueName;
 import java.math.BigDecimal;
 import java.math.BigDecimal;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
+import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.LongSummaryStatistics;
 import java.util.LongSummaryStatistics;
 import java.util.Map;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Optional;
 import java.util.Properties;
 import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
 import java.util.stream.Stream;
 import lombok.RequiredArgsConstructor;
 import lombok.RequiredArgsConstructor;
+import lombok.Setter;
 import lombok.SneakyThrows;
 import lombok.SneakyThrows;
 import lombok.extern.log4j.Log4j2;
 import lombok.extern.log4j.Log4j2;
 import org.apache.kafka.clients.admin.AdminClient;
 import org.apache.kafka.clients.admin.AdminClient;
@@ -39,17 +50,23 @@ import org.apache.kafka.clients.admin.AdminClientConfig;
 import org.apache.kafka.clients.admin.AlterConfigOp;
 import org.apache.kafka.clients.admin.AlterConfigOp;
 import org.apache.kafka.clients.admin.Config;
 import org.apache.kafka.clients.admin.Config;
 import org.apache.kafka.clients.admin.ConfigEntry;
 import org.apache.kafka.clients.admin.ConfigEntry;
-import org.apache.kafka.clients.admin.ConsumerGroupDescription;
 import org.apache.kafka.clients.admin.ConsumerGroupListing;
 import org.apache.kafka.clients.admin.ConsumerGroupListing;
 import org.apache.kafka.clients.admin.ListTopicsOptions;
 import org.apache.kafka.clients.admin.ListTopicsOptions;
+import org.apache.kafka.clients.admin.NewPartitionReassignment;
+import org.apache.kafka.clients.admin.NewPartitions;
 import org.apache.kafka.clients.admin.NewTopic;
 import org.apache.kafka.clients.admin.NewTopic;
 import org.apache.kafka.clients.admin.RecordsToDelete;
 import org.apache.kafka.clients.admin.RecordsToDelete;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.clients.producer.RecordMetadata;
 import org.apache.kafka.common.Node;
 import org.apache.kafka.common.Node;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.config.ConfigResource;
 import org.apache.kafka.common.config.ConfigResource;
+import org.apache.kafka.common.serialization.ByteArraySerializer;
 import org.apache.kafka.common.serialization.BytesDeserializer;
 import org.apache.kafka.common.serialization.BytesDeserializer;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
 import org.springframework.beans.factory.annotation.Value;
 import org.springframework.beans.factory.annotation.Value;
@@ -71,6 +88,8 @@ public class KafkaService {
   private final Map<String, ExtendedAdminClient> adminClientCache = new ConcurrentHashMap<>();
   private final Map<String, ExtendedAdminClient> adminClientCache = new ConcurrentHashMap<>();
   private final JmxClusterUtil jmxClusterUtil;
   private final JmxClusterUtil jmxClusterUtil;
   private final ClustersStorage clustersStorage;
   private final ClustersStorage clustersStorage;
+  private final DeserializationService deserializationService;
+  @Setter // used in tests
   @Value("${kafka.admin-client-timeout}")
   @Value("${kafka.admin-client-timeout}")
   private int clientTimeout;
   private int clientTimeout;
 
 
@@ -90,13 +109,16 @@ public class KafkaService {
   public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
   public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
     return getOrCreateAdminClient(cluster)
     return getOrCreateAdminClient(cluster)
         .flatMap(
         .flatMap(
-            ac -> getClusterMetrics(ac.getAdminClient())
-                .flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac.getAdminClient()))
-                .flatMap(clusterMetrics ->
-                    getTopicsData(ac.getAdminClient()).flatMap(it ->
-                        updateSegmentMetrics(ac.getAdminClient(), clusterMetrics, it)
-                    ).map(segmentSizeDto -> buildFromData(cluster, segmentSizeDto))
-                )
+            ac -> ClusterUtil.getClusterVersion(ac.getAdminClient()).flatMap(
+                version ->
+                    getClusterMetrics(ac.getAdminClient())
+                        .flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac.getAdminClient()))
+                        .flatMap(clusterMetrics ->
+                            getTopicsData(ac.getAdminClient()).flatMap(it ->
+                                updateSegmentMetrics(ac.getAdminClient(), clusterMetrics, it)
+                            ).map(segmentSizeDto -> buildFromData(cluster, version, segmentSizeDto))
+                        )
+            )
         ).onErrorResume(
         ).onErrorResume(
             e -> Mono.just(cluster.toBuilder()
             e -> Mono.just(cluster.toBuilder()
                 .status(ServerStatus.OFFLINE)
                 .status(ServerStatus.OFFLINE)
@@ -106,10 +128,12 @@ public class KafkaService {
   }
   }
 
 
   private KafkaCluster buildFromData(KafkaCluster currentCluster,
   private KafkaCluster buildFromData(KafkaCluster currentCluster,
+                                     String version,
                                      InternalSegmentSizeDto segmentSizeDto) {
                                      InternalSegmentSizeDto segmentSizeDto) {
 
 
     var topics = segmentSizeDto.getInternalTopicWithSegmentSize();
     var topics = segmentSizeDto.getInternalTopicWithSegmentSize();
     var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize();
     var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize();
+    var brokersIds = new ArrayList<>(brokersMetrics.getInternalBrokerMetrics().keySet());
 
 
     InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
     InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
         brokersMetrics.toBuilder();
         brokersMetrics.toBuilder();
@@ -138,12 +162,14 @@ public class KafkaService {
         .build();
         .build();
 
 
     return currentCluster.toBuilder()
     return currentCluster.toBuilder()
+        .version(version)
         .status(ServerStatus.ONLINE)
         .status(ServerStatus.ONLINE)
         .zookeeperStatus(zookeeperStatus)
         .zookeeperStatus(zookeeperStatus)
         .lastZookeeperException(zookeeperException)
         .lastZookeeperException(zookeeperException)
         .lastKafkaException(null)
         .lastKafkaException(null)
         .metrics(clusterMetrics)
         .metrics(clusterMetrics)
         .topics(topics)
         .topics(topics)
+        .brokers(brokersIds)
         .build();
         .build();
   }
   }
 
 
@@ -299,45 +325,59 @@ public class KafkaService {
         );
         );
   }
   }
 
 
-  public Mono<Collection<ConsumerGroupDescription>> getConsumerGroupsInternal(
+  public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(
       KafkaCluster cluster) {
       KafkaCluster cluster) {
     return getOrCreateAdminClient(cluster).flatMap(ac ->
     return getOrCreateAdminClient(cluster).flatMap(ac ->
         ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
         ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
             .flatMap(s ->
             .flatMap(s ->
-                ClusterUtil.toMono(
-                    ac.getAdminClient().describeConsumerGroups(
-                        s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())
-                    ).all()
-                ).map(Map::values)
+                getConsumerGroupsInternal(
+                    cluster,
+                    s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList()))
+                )
+            );
+  }
+
+  public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(
+      KafkaCluster cluster, List<String> groupIds) {
+
+    return getOrCreateAdminClient(cluster).flatMap(ac ->
+        ClusterUtil.toMono(
+            ac.getAdminClient().describeConsumerGroups(groupIds).all()
+        ).map(Map::values)
+    ).flatMap(descriptions ->
+        Flux.fromIterable(descriptions)
+            .parallel()
+            .flatMap(d ->
+                groupMetadata(cluster, d.groupId())
+                    .map(offsets -> ClusterUtil.convertToInternalConsumerGroup(d, offsets))
             )
             )
+            .sequential()
+            .collectList()
     );
     );
   }
   }
 
 
-  public Mono<List<ConsumerGroup>> getConsumerGroups(KafkaCluster cluster) {
-    return getConsumerGroupsInternal(cluster)
-        .map(c -> c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList()));
-  }
+  public Mono<List<InternalConsumerGroup>> getConsumerGroups(
+      KafkaCluster cluster, Optional<String> topic, List<String> groupIds) {
+    final Mono<List<InternalConsumerGroup>> consumerGroups;
 
 
-  public Mono<TopicConsumerGroups> getTopicConsumerGroups(KafkaCluster cluster, String topic) {
-    final Map<TopicPartition, Long> endOffsets = topicEndOffsets(cluster, topic);
+    if (groupIds.isEmpty()) {
+      consumerGroups = getConsumerGroupsInternal(cluster);
+    } else {
+      consumerGroups = getConsumerGroupsInternal(cluster, groupIds);
+    }
 
 
-    return getConsumerGroupsInternal(cluster)
-        .flatMapIterable(c ->
+    return consumerGroups.map(c ->
             c.stream()
             c.stream()
                 .map(d -> ClusterUtil.filterConsumerGroupTopic(d, topic))
                 .map(d -> ClusterUtil.filterConsumerGroupTopic(d, topic))
                 .filter(Optional::isPresent)
                 .filter(Optional::isPresent)
                 .map(Optional::get)
                 .map(Optional::get)
-                .map(d ->
-                    groupMetadata(cluster, d.groupId())
-                      .flatMapIterable(meta ->
-                          d.members().stream().flatMap(m ->
-                              ClusterUtil.convertToConsumerTopicPartitionDetails(
-                                  m, meta, endOffsets, d.groupId()
-                              ).stream()
-                          ).collect(Collectors.toList())
-                      )
-                ).collect(Collectors.toList())
-        ).flatMap(f -> f).collectList().map(l -> new TopicConsumerGroups().consumers(l));
+                .map(g ->
+                    g.toBuilder().endOffsets(
+                        topicPartitionsEndOffsets(cluster, g.getOffsets().keySet())
+                    ).build()
+                )
+                .collect(Collectors.toList())
+        );
   }
   }
 
 
   public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster,
   public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster,
@@ -349,16 +389,6 @@ public class KafkaService {
     ).flatMap(ClusterUtil::toMono);
     ).flatMap(ClusterUtil::toMono);
   }
   }
 
 
-  public Map<TopicPartition, Long> topicEndOffsets(
-      KafkaCluster cluster, String topic) {
-    try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
-      final List<TopicPartition> topicPartitions = consumer.partitionsFor(topic).stream()
-          .map(i -> new TopicPartition(i.topic(), i.partition()))
-          .collect(Collectors.toList());
-      return consumer.endOffsets(topicPartitions);
-    }
-  }
-
   public Map<TopicPartition, Long> topicPartitionsEndOffsets(
   public Map<TopicPartition, Long> topicPartitionsEndOffsets(
       KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
       KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
     try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
     try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
@@ -367,13 +397,19 @@ public class KafkaService {
   }
   }
 
 
   public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
   public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
+    return createConsumer(cluster, Map.of());
+  }
+
+  public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster,
+                                                    Map<String, Object> properties) {
     Properties props = new Properties();
     Properties props = new Properties();
     props.putAll(cluster.getProperties());
     props.putAll(cluster.getProperties());
-    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui");
+    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID());
     props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
     props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
     props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
     props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
     props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
     props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
     props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
     props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+    props.putAll(properties);
 
 
     return new KafkaConsumer<>(props);
     return new KafkaConsumer<>(props);
   }
   }
@@ -487,7 +523,7 @@ public class KafkaService {
                   final Map<Integer, LongSummaryStatistics> brokerStats =
                   final Map<Integer, LongSummaryStatistics> brokerStats =
                       topicPartitions.stream().collect(
                       topicPartitions.stream().collect(
                           Collectors.groupingBy(
                           Collectors.groupingBy(
-                              t -> t.getT1(),
+                              Tuple2::getT1,
                               Collectors.summarizingLong(Tuple3::getT3)
                               Collectors.summarizingLong(Tuple3::getT3)
                           )
                           )
                       );
                       );
@@ -631,5 +667,212 @@ public class KafkaService {
         .map(ac -> ac.deleteRecords(records)).then();
         .map(ac -> ac.deleteRecords(records)).then();
   }
   }
 
 
+  public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
+                                          CreateTopicMessage msg) {
+    RecordSerDe serde =
+        deserializationService.getRecordDeserializerForCluster(cluster);
+
+    Properties properties = new Properties();
+    properties.putAll(cluster.getProperties());
+    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
+    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
+    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
+    try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(properties)) {
+      final ProducerRecord<byte[], byte[]> producerRecord = serde.serialize(
+          topic,
+          msg.getKey(),
+          msg.getContent(),
+          msg.getPartition()
+      );
+
+      CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
+      producer.send(producerRecord, (metadata, exception) -> {
+        if (exception != null) {
+          cf.completeExceptionally(exception);
+        } else {
+          cf.complete(metadata);
+        }
+      });
+      return Mono.fromFuture(cf);
+    }
+  }
+
+  private Mono<InternalTopic> increaseTopicPartitions(AdminClient adminClient,
+                                                      String topicName,
+                                                      Map<String, NewPartitions> newPartitionsMap
+  ) {
+    return ClusterUtil.toMono(adminClient.createPartitions(newPartitionsMap).all(), topicName)
+        .flatMap(topic -> getTopicsData(adminClient, Collections.singleton(topic)).next());
+  }
+
+  public Mono<InternalTopic> increaseTopicPartitions(
+      KafkaCluster cluster,
+      String topicName,
+      PartitionsIncrease partitionsIncrease) {
+    return getOrCreateAdminClient(cluster)
+        .flatMap(ac -> {
+          Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount();
+          Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
+
+          if (requestedCount < actualCount) {
+            return Mono.error(
+                new ValidationException(String.format(
+                    "Topic currently has %s partitions, which is higher than the requested %s.",
+                    actualCount, requestedCount)));
+          }
+          if (requestedCount.equals(actualCount)) {
+            return Mono.error(
+                new ValidationException(
+                    String.format("Topic already has %s partitions.", actualCount)));
+          }
+
+          Map<String, NewPartitions> newPartitionsMap = Collections.singletonMap(
+              topicName,
+              NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
+          );
+          return increaseTopicPartitions(ac.getAdminClient(), topicName, newPartitionsMap);
+        });
+  }
+
+  private Mono<InternalTopic> changeReplicationFactor(
+      AdminClient adminClient,
+      String topicName,
+      Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
+  ) {
+    return ClusterUtil.toMono(adminClient
+        .alterPartitionReassignments(reassignments).all(), topicName)
+        .flatMap(topic -> getTopicsData(adminClient, Collections.singleton(topic)).next());
+  }
+
+  /**
+   * Change topic replication factor, works on brokers versions 5.4.x and higher
+   */
+  public Mono<InternalTopic> changeReplicationFactor(
+      KafkaCluster cluster,
+      String topicName,
+      ReplicationFactorChange replicationFactorChange) {
+    return getOrCreateAdminClient(cluster)
+        .flatMap(ac -> {
+          Integer actual = cluster.getTopics().get(topicName).getReplicationFactor();
+          Integer requested = replicationFactorChange.getTotalReplicationFactor();
+          Integer brokersCount = cluster.getMetrics().getBrokerCount();
+
+          if (requested.equals(actual)) {
+            return Mono.error(
+                new ValidationException(
+                    String.format("Topic already has replicationFactor %s.", actual)));
+          }
+          if (requested > brokersCount) {
+            return Mono.error(
+                new ValidationException(
+                    String.format("Requested replication factor %s more than brokers count %s.",
+                        requested, brokersCount)));
+          }
+          return changeReplicationFactor(ac.getAdminClient(), topicName,
+              getPartitionsReassignments(cluster, topicName,
+                  replicationFactorChange));
+        });
+  }
+
+  private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
+      KafkaCluster cluster,
+      String topicName,
+      ReplicationFactorChange replicationFactorChange) {
+    // Current assignment map (Partition number -> List of brokers)
+    Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(cluster, topicName);
+    // Brokers map (Broker id -> count)
+    Map<Integer, Integer> brokersUsage = getBrokersMap(cluster, currentAssignment);
+    int currentReplicationFactor = cluster.getTopics().get(topicName).getReplicationFactor();
+
+    // If we should to increase Replication factor
+    if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) {
+      // For each partition
+      for (var assignmentList : currentAssignment.values()) {
+        // Get brokers list sorted by usage
+        var brokers = brokersUsage.entrySet().stream()
+            .sorted(Map.Entry.comparingByValue())
+            .map(Map.Entry::getKey)
+            .collect(Collectors.toList());
+
+        // Iterate brokers and try to add them in assignment
+        // while (partition replicas count != requested replication factor)
+        for (Integer broker : brokers) {
+          if (!assignmentList.contains(broker)) {
+            assignmentList.add(broker);
+            brokersUsage.merge(broker, 1, Integer::sum);
+          }
+          if (assignmentList.size() == replicationFactorChange.getTotalReplicationFactor()) {
+            break;
+          }
+        }
+        if (assignmentList.size() != replicationFactorChange.getTotalReplicationFactor()) {
+          throw new ValidationException("Something went wrong during adding replicas");
+        }
+      }
+
+      // If we should to decrease Replication factor
+    } else if (replicationFactorChange.getTotalReplicationFactor() < currentReplicationFactor) {
+      for (Map.Entry<Integer, List<Integer>> assignmentEntry : currentAssignment.entrySet()) {
+        var partition = assignmentEntry.getKey();
+        var brokers = assignmentEntry.getValue();
+
+        // Get brokers list sorted by usage in reverse order
+        var brokersUsageList = brokersUsage.entrySet().stream()
+            .sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
+            .map(Map.Entry::getKey)
+            .collect(Collectors.toList());
+
+        // Iterate brokers and try to remove them from assignment
+        // while (partition replicas count != requested replication factor)
+        for (Integer broker : brokersUsageList) {
+          // Check is the broker the leader of partition
+          if (!cluster.getTopics().get(topicName).getPartitions().get(partition).getLeader()
+              .equals(broker)) {
+            brokers.remove(broker);
+            brokersUsage.merge(broker, -1, Integer::sum);
+          }
+          if (brokers.size() == replicationFactorChange.getTotalReplicationFactor()) {
+            break;
+          }
+        }
+        if (brokers.size() != replicationFactorChange.getTotalReplicationFactor()) {
+          throw new ValidationException("Something went wrong during removing replicas");
+        }
+      }
+    } else {
+      throw new ValidationException("Replication factor already equals requested");
+    }
+
+    // Return result map
+    return currentAssignment.entrySet().stream().collect(Collectors.toMap(
+        e -> new TopicPartition(topicName, e.getKey()),
+        e -> Optional.of(new NewPartitionReassignment(e.getValue()))
+    ));
+  }
+
+  private Map<Integer, List<Integer>> getCurrentAssignment(KafkaCluster cluster, String topicName) {
+    return cluster.getTopics().get(topicName).getPartitions().values().stream()
+        .collect(Collectors.toMap(
+            InternalPartition::getPartition,
+            p -> p.getReplicas().stream()
+                .map(InternalReplica::getBroker)
+                .collect(Collectors.toList())
+        ));
+  }
+
+  private Map<Integer, Integer> getBrokersMap(KafkaCluster cluster,
+                                              Map<Integer, List<Integer>> currentAssignment) {
+    Map<Integer, Integer> result = cluster.getBrokers().stream()
+        .collect(Collectors.toMap(
+            c -> c,
+            c -> 0
+        ));
+    currentAssignment.values().forEach(brokers -> brokers
+        .forEach(broker -> result.put(broker, result.get(broker) + 1)));
+
+    return result;
+  }
+
+
 
 
 }
 }

+ 169 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/OffsetsResetService.java

@@ -0,0 +1,169 @@
+package com.provectus.kafka.ui.service;
+
+import static com.google.common.base.Preconditions.checkArgument;
+import static java.util.stream.Collectors.toMap;
+import static java.util.stream.Collectors.toSet;
+import static org.apache.kafka.common.ConsumerGroupState.DEAD;
+import static org.apache.kafka.common.ConsumerGroupState.EMPTY;
+
+import com.google.common.collect.Sets;
+import com.provectus.kafka.ui.exception.NotFoundException;
+import com.provectus.kafka.ui.exception.ValidationException;
+import com.provectus.kafka.ui.model.InternalConsumerGroup;
+import com.provectus.kafka.ui.model.KafkaCluster;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.admin.ConsumerGroupDescription;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
+import org.apache.kafka.common.TopicPartition;
+import org.springframework.stereotype.Component;
+
+/**
+ * Implementation follows https://cwiki.apache.org/confluence/display/KAFKA/KIP-122%3A+Add+Reset+Consumer+Group+Offsets+tooling
+ * to works like "kafka-consumer-groups --reset-offsets" console command
+ * (see kafka.admin.ConsumerGroupCommand)
+ */
+@Log4j2
+@Component
+@RequiredArgsConstructor
+public class OffsetsResetService {
+
+  private final KafkaService kafkaService;
+
+  public void resetToEarliest(KafkaCluster cluster, String group, String topic,
+                              Collection<Integer> partitions) {
+    checkGroupCondition(cluster, group);
+    try (var consumer = getConsumer(cluster, group)) {
+      var targetPartitions = getTargetPartitions(consumer, topic, partitions);
+      var offsets = consumer.beginningOffsets(targetPartitions);
+      commitOffsets(consumer, offsets);
+    }
+  }
+
+  public void resetToLatest(KafkaCluster cluster, String group, String topic,
+                            Collection<Integer> partitions) {
+    checkGroupCondition(cluster, group);
+    try (var consumer = getConsumer(cluster, group)) {
+      var targetPartitions = getTargetPartitions(consumer, topic, partitions);
+      var offsets = consumer.endOffsets(targetPartitions);
+      commitOffsets(consumer, offsets);
+    }
+  }
+
+  public void resetToTimestamp(KafkaCluster cluster, String group, String topic,
+                               Collection<Integer> partitions, long targetTimestamp) {
+    checkGroupCondition(cluster, group);
+    try (var consumer = getConsumer(cluster, group)) {
+      var targetPartitions = getTargetPartitions(consumer, topic, partitions);
+      var offsets = offsetsByTimestamp(consumer, targetPartitions, targetTimestamp);
+      commitOffsets(consumer, offsets);
+    }
+  }
+
+  public void resetToOffsets(KafkaCluster cluster, String group, String topic,
+                             Map<Integer, Long> targetOffsets) {
+    checkGroupCondition(cluster, group);
+    try (var consumer = getConsumer(cluster, group)) {
+      var offsets = targetOffsets.entrySet().stream()
+          .collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
+      offsets = editOffsetsIfNeeded(consumer, offsets);
+      commitOffsets(consumer, offsets);
+    }
+  }
+
+  private void checkGroupCondition(KafkaCluster cluster, String groupId) {
+    InternalConsumerGroup description =
+        kafkaService.getConsumerGroupsInternal(cluster)
+            .blockOptional()
+            .stream()
+            .flatMap(Collection::stream)
+            .filter(cgd -> cgd.getGroupId().equals(groupId))
+            .findAny()
+            .orElseThrow(() -> new NotFoundException("Consumer group not found"));
+
+    if (!Set.of(DEAD, EMPTY).contains(description.getState())) {
+      throw new ValidationException(
+          String.format(
+              "Group's offsets can be reset only if group is inactive, but group is in %s state",
+              description.getState()));
+    }
+  }
+
+  private Map<TopicPartition, Long> offsetsByTimestamp(Consumer<?, ?> consumer,
+                                                       Set<TopicPartition> partitions,
+                                                       long timestamp) {
+    Map<TopicPartition, OffsetAndTimestamp> timestampedOffsets = consumer
+        .offsetsForTimes(partitions.stream().collect(toMap(p -> p, p -> timestamp)));
+
+    var foundOffsets = timestampedOffsets.entrySet().stream()
+        .filter(e -> e.getValue() != null)
+        .collect(toMap(Map.Entry::getKey, e -> e.getValue().offset()));
+
+    // for partitions where we didnt find offset by timestamp, we use end offsets
+    foundOffsets.putAll(consumer.endOffsets(Sets.difference(partitions, foundOffsets.keySet())));
+    return foundOffsets;
+  }
+
+  private Set<TopicPartition> getTargetPartitions(Consumer<?, ?> consumer, String topic,
+                                                  Collection<Integer> partitions) {
+    var allPartitions = allTopicPartitions(consumer, topic);
+    if (partitions == null || partitions.isEmpty()) {
+      return allPartitions;
+    } else {
+      return partitions.stream()
+          .map(idx -> new TopicPartition(topic, idx))
+          .peek(tp -> checkArgument(allPartitions.contains(tp), "Invalid partition %s", tp))
+          .collect(toSet());
+    }
+  }
+
+  private Set<TopicPartition> allTopicPartitions(Consumer<?, ?> consumer, String topic) {
+    return consumer.partitionsFor(topic).stream()
+        .map(info -> new TopicPartition(topic, info.partition()))
+        .collect(toSet());
+  }
+
+  /**
+   * Checks if submitted offsets is between earliest and latest offsets. If case of range change
+   * fail we reset offset to either earliest or latest offsets (To follow logic from
+   * kafka.admin.ConsumerGroupCommand.scala)
+   */
+  private Map<TopicPartition, Long> editOffsetsIfNeeded(Consumer<?, ?> consumer,
+                                                        Map<TopicPartition, Long> offsetsToCheck) {
+    var earliestOffsets = consumer.beginningOffsets(offsetsToCheck.keySet());
+    var latestOffsets = consumer.endOffsets(offsetsToCheck.keySet());
+    var result = new HashMap<TopicPartition, Long>();
+    offsetsToCheck.forEach((tp, offset) -> {
+      if (earliestOffsets.get(tp) > offset) {
+        log.warn("Offset for partition {} is lower than earliest offset, resetting to earliest",
+            tp);
+        result.put(tp, earliestOffsets.get(tp));
+      } else if (latestOffsets.get(tp) < offset) {
+        log.warn("Offset for partition {} is greater than latest offset, resetting to latest", tp);
+        result.put(tp, latestOffsets.get(tp));
+      } else {
+        result.put(tp, offset);
+      }
+    });
+    return result;
+  }
+
+  private void commitOffsets(Consumer<?, ?> consumer, Map<TopicPartition, Long> offsets) {
+    consumer.commitSync(
+        offsets.entrySet().stream()
+            .collect(toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue())))
+    );
+  }
+
+  private Consumer<?, ?> getConsumer(KafkaCluster cluster, String groupId) {
+    return kafkaService.createConsumer(cluster, Map.of(ConsumerConfig.GROUP_ID_CONFIG, groupId));
+  }
+
+}

+ 168 - 97
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ClusterUtil.java

@@ -3,20 +3,24 @@ package com.provectus.kafka.ui.util;
 import static com.provectus.kafka.ui.util.KafkaConstants.TOPIC_DEFAULT_CONFIGS;
 import static com.provectus.kafka.ui.util.KafkaConstants.TOPIC_DEFAULT_CONFIGS;
 import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG;
 import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG;
 
 
-import com.provectus.kafka.ui.deserialization.RecordDeserializer;
+import com.provectus.kafka.ui.model.Broker;
 import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.ConsumerGroup;
 import com.provectus.kafka.ui.model.ConsumerGroupDetails;
 import com.provectus.kafka.ui.model.ConsumerGroupDetails;
-import com.provectus.kafka.ui.model.ConsumerTopicPartitionDetail;
+import com.provectus.kafka.ui.model.ConsumerGroupState;
+import com.provectus.kafka.ui.model.ConsumerGroupTopicPartition;
 import com.provectus.kafka.ui.model.ExtendedAdminClient;
 import com.provectus.kafka.ui.model.ExtendedAdminClient;
+import com.provectus.kafka.ui.model.InternalConsumerGroup;
 import com.provectus.kafka.ui.model.InternalPartition;
 import com.provectus.kafka.ui.model.InternalPartition;
 import com.provectus.kafka.ui.model.InternalReplica;
 import com.provectus.kafka.ui.model.InternalReplica;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.InternalTopic;
 import com.provectus.kafka.ui.model.InternalTopicConfig;
 import com.provectus.kafka.ui.model.InternalTopicConfig;
 import com.provectus.kafka.ui.model.ServerStatus;
 import com.provectus.kafka.ui.model.ServerStatus;
 import com.provectus.kafka.ui.model.TopicMessage;
 import com.provectus.kafka.ui.model.TopicMessage;
+import com.provectus.kafka.ui.serde.RecordSerDe;
 import java.time.Instant;
 import java.time.Instant;
 import java.time.OffsetDateTime;
 import java.time.OffsetDateTime;
 import java.time.ZoneId;
 import java.time.ZoneId;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashMap;
@@ -31,8 +35,6 @@ import org.apache.kafka.clients.admin.AdminClient;
 import org.apache.kafka.clients.admin.Config;
 import org.apache.kafka.clients.admin.Config;
 import org.apache.kafka.clients.admin.ConfigEntry;
 import org.apache.kafka.clients.admin.ConfigEntry;
 import org.apache.kafka.clients.admin.ConsumerGroupDescription;
 import org.apache.kafka.clients.admin.ConsumerGroupDescription;
-import org.apache.kafka.clients.admin.MemberAssignment;
-import org.apache.kafka.clients.admin.MemberDescription;
 import org.apache.kafka.clients.admin.TopicDescription;
 import org.apache.kafka.clients.admin.TopicDescription;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
 import org.apache.kafka.clients.consumer.OffsetAndMetadata;
@@ -43,6 +45,7 @@ import org.apache.kafka.common.config.ConfigResource;
 import org.apache.kafka.common.record.TimestampType;
 import org.apache.kafka.common.record.TimestampType;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
 import reactor.core.publisher.Mono;
 import reactor.core.publisher.Mono;
+import reactor.util.function.Tuple2;
 
 
 @Slf4j
 @Slf4j
 public class ClusterUtil {
 public class ClusterUtil {
@@ -71,57 +74,120 @@ public class ClusterUtil {
     }));
     }));
   }
   }
 
 
-  public static ConsumerGroup convertToConsumerGroup(ConsumerGroupDescription c) {
-    ConsumerGroup consumerGroup = new ConsumerGroup();
-    consumerGroup.setConsumerGroupId(c.groupId());
-    consumerGroup.setNumConsumers(c.members().size());
-    int numTopics = c.members().stream()
-        .flatMap(m -> m.assignment().topicPartitions().stream().flatMap(t -> Stream.of(t.topic())))
-        .collect(Collectors.toSet()).size();
-    consumerGroup.setNumTopics(numTopics);
-    consumerGroup.setSimple(c.isSimpleConsumerGroup());
-    Optional.ofNullable(c.state())
-        .ifPresent(s -> consumerGroup.setState(s.name()));
-    Optional.ofNullable(c.coordinator())
-        .ifPresent(coord -> consumerGroup.setCoordintor(coord.host()));
-    consumerGroup.setPartitionAssignor(c.partitionAssignor());
+  public static InternalConsumerGroup convertToInternalConsumerGroup(
+      ConsumerGroupDescription description, Map<TopicPartition, OffsetAndMetadata> offsets) {
+
+    var builder = InternalConsumerGroup.builder();
+    builder.groupId(description.groupId());
+    builder.simple(description.isSimpleConsumerGroup());
+    builder.state(description.state());
+    builder.partitionAssignor(description.partitionAssignor());
+    builder.members(
+        description.members().stream()
+            .map(m ->
+                InternalConsumerGroup.InternalMember.builder()
+                  .assignment(m.assignment().topicPartitions())
+                  .clientId(m.clientId())
+                  .groupInstanceId(m.groupInstanceId().orElse(""))
+                  .consumerId(m.consumerId())
+                  .clientId(m.clientId())
+                  .host(m.host())
+                  .build()
+            ).collect(Collectors.toList())
+    );
+    builder.offsets(offsets);
+    Optional.ofNullable(description.coordinator()).ifPresent(builder::coordinator);
+    return builder.build();
+  }
+
+  public static ConsumerGroup convertToConsumerGroup(InternalConsumerGroup c) {
+    return convertToConsumerGroup(c, new ConsumerGroup());
+  }
+
+  public static <T extends ConsumerGroup> T convertToConsumerGroup(
+      InternalConsumerGroup c, T consumerGroup) {
+    consumerGroup.setGroupId(c.getGroupId());
+    consumerGroup.setMembers(c.getMembers().size());
+
+    int numTopics = Stream.concat(
+        c.getOffsets().keySet().stream().map(TopicPartition::topic),
+        c.getMembers().stream()
+            .flatMap(m -> m.getAssignment().stream().map(TopicPartition::topic))
+    ).collect(Collectors.toSet()).size();
+
+    long messagesBehind = c.getOffsets().entrySet().stream()
+        .mapToLong(e ->
+            Optional.ofNullable(c.getEndOffsets())
+              .map(o -> o.get(e.getKey()))
+              .map(o -> o - e.getValue().offset())
+              .orElse(0L)
+        ).sum();
+
+    consumerGroup.setMessagesBehind(messagesBehind);
+    consumerGroup.setTopics(numTopics);
+    consumerGroup.setSimple(c.isSimple());
+
+    Optional.ofNullable(c.getState())
+        .ifPresent(s -> consumerGroup.setState(mapConsumerGroupState(s)));
+    Optional.ofNullable(c.getCoordinator())
+        .ifPresent(cd -> consumerGroup.setCoordinator(mapCoordinator(cd)));
+
+    consumerGroup.setPartitionAssignor(c.getPartitionAssignor());
     return consumerGroup;
     return consumerGroup;
   }
   }
 
 
-  public static ConsumerGroupDetails convertToConsumerGroupDetails(
-      ConsumerGroupDescription desc, List<ConsumerTopicPartitionDetail> consumers
-  ) {
-    return new ConsumerGroupDetails()
-        .consumers(consumers)
-        .consumerGroupId(desc.groupId())
-        .simple(desc.isSimpleConsumerGroup())
-        .coordintor(Optional.ofNullable(desc.coordinator()).map(Node::host).orElse(""))
-        .state(Optional.ofNullable(desc.state()).map(Enum::name).orElse(""))
-        .partitionAssignor(desc.partitionAssignor());
+  public static ConsumerGroupDetails convertToConsumerGroupDetails(InternalConsumerGroup g) {
+    final ConsumerGroupDetails details = convertToConsumerGroup(g, new ConsumerGroupDetails());
+    Map<TopicPartition, ConsumerGroupTopicPartition> partitionMap = new HashMap<>();
+
+    for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : g.getOffsets().entrySet()) {
+      ConsumerGroupTopicPartition partition = new ConsumerGroupTopicPartition();
+      partition.setTopic(entry.getKey().topic());
+      partition.setPartition(entry.getKey().partition());
+      partition.setCurrentOffset(entry.getValue().offset());
+
+      final Optional<Long> endOffset = Optional.ofNullable(g.getEndOffsets())
+          .map(o -> o.get(entry.getKey()));
+
+      final Long behind = endOffset.map(o -> o - entry.getValue().offset())
+          .orElse(0L);
+
+      partition.setEndOffset(endOffset.orElse(0L));
+      partition.setMessagesBehind(behind);
+
+      partitionMap.put(entry.getKey(), partition);
+    }
+
+    for (InternalConsumerGroup.InternalMember member : g.getMembers()) {
+      for (TopicPartition topicPartition : member.getAssignment()) {
+        final ConsumerGroupTopicPartition partition = partitionMap.computeIfAbsent(topicPartition,
+            (tp) -> new ConsumerGroupTopicPartition()
+                .topic(tp.topic())
+                .partition(tp.partition())
+        );
+        partition.setHost(member.getHost());
+        partition.setConsumerId(member.getConsumerId());
+        partitionMap.put(topicPartition, partition);
+      }
+    }
+    details.setPartitions(new ArrayList<>(partitionMap.values()));
+    return details;
+  }
+
+  private static Broker mapCoordinator(Node node) {
+    return new Broker().host(node.host()).id(node.id());
   }
   }
 
 
-  public static List<ConsumerTopicPartitionDetail> convertToConsumerTopicPartitionDetails(
-      MemberDescription consumer,
-      Map<TopicPartition, OffsetAndMetadata> groupOffsets,
-      Map<TopicPartition, Long> endOffsets,
-      String groupId
-  ) {
-    return consumer.assignment().topicPartitions().stream()
-        .map(tp -> {
-          long currentOffset = Optional.ofNullable(groupOffsets.get(tp))
-              .map(OffsetAndMetadata::offset).orElse(0L);
-          long endOffset = Optional.ofNullable(endOffsets.get(tp)).orElse(0L);
-          ConsumerTopicPartitionDetail cd = new ConsumerTopicPartitionDetail();
-          cd.setGroupId(groupId);
-          cd.setConsumerId(consumer.consumerId());
-          cd.setHost(consumer.host());
-          cd.setTopic(tp.topic());
-          cd.setPartition(tp.partition());
-          cd.setCurrentOffset(currentOffset);
-          cd.setEndOffset(endOffset);
-          cd.setMessagesBehind(endOffset - currentOffset);
-          return cd;
-        }).collect(Collectors.toList());
+  private static ConsumerGroupState mapConsumerGroupState(
+      org.apache.kafka.common.ConsumerGroupState state) {
+    switch (state) {
+      case DEAD: return ConsumerGroupState.DEAD;
+      case EMPTY: return ConsumerGroupState.EMPTY;
+      case STABLE: return ConsumerGroupState.STABLE;
+      case PREPARING_REBALANCE: return ConsumerGroupState.PREPARING_REBALANCE;
+      case COMPLETING_REBALANCE: return ConsumerGroupState.COMPLETING_REBALANCE;
+      default: return ConsumerGroupState.UNKNOWN;
+    }
   }
   }
 
 
 
 
@@ -197,7 +263,7 @@ public class ClusterUtil {
   }
   }
 
 
   public static TopicMessage mapToTopicMessage(ConsumerRecord<Bytes, Bytes> consumerRecord,
   public static TopicMessage mapToTopicMessage(ConsumerRecord<Bytes, Bytes> consumerRecord,
-                                               RecordDeserializer recordDeserializer) {
+                                               RecordSerDe recordDeserializer) {
     Map<String, String> headers = new HashMap<>();
     Map<String, String> headers = new HashMap<>();
     consumerRecord.headers().iterator()
     consumerRecord.headers().iterator()
         .forEachRemaining(header -> headers.put(header.key(), new String(header.value())));
         .forEachRemaining(header -> headers.put(header.key(), new String(header.value())));
@@ -212,12 +278,11 @@ public class ClusterUtil {
     topicMessage.setOffset(consumerRecord.offset());
     topicMessage.setOffset(consumerRecord.offset());
     topicMessage.setTimestamp(timestamp);
     topicMessage.setTimestamp(timestamp);
     topicMessage.setTimestampType(timestampType);
     topicMessage.setTimestampType(timestampType);
-    if (consumerRecord.key() != null) {
-      topicMessage.setKey(consumerRecord.key().toString());
-    }
+
     topicMessage.setHeaders(headers);
     topicMessage.setHeaders(headers);
-    Object parsedValue = recordDeserializer.deserialize(consumerRecord);
-    topicMessage.setContent(parsedValue);
+    var parsed = recordDeserializer.deserialize(consumerRecord);
+    topicMessage.setKey(parsed.getKey());
+    topicMessage.setContent(parsed.getValue());
 
 
     return topicMessage;
     return topicMessage;
   }
   }
@@ -237,23 +302,12 @@ public class ClusterUtil {
 
 
   public static Mono<Set<ExtendedAdminClient.SupportedFeature>> getSupportedFeatures(
   public static Mono<Set<ExtendedAdminClient.SupportedFeature>> getSupportedFeatures(
       AdminClient adminClient) {
       AdminClient adminClient) {
-    return ClusterUtil.toMono(adminClient.describeCluster().controller())
-        .map(Node::id)
-        .map(id -> Collections
-            .singletonList(new ConfigResource(ConfigResource.Type.BROKER, id.toString())))
-        .map(brokerCR -> adminClient.describeConfigs(brokerCR).all())
-        .flatMap(ClusterUtil::toMono)
+    return getClusterVersion(adminClient)
         .map(ClusterUtil::getSupportedUpdateFeature)
         .map(ClusterUtil::getSupportedUpdateFeature)
         .map(Collections::singleton);
         .map(Collections::singleton);
   }
   }
 
 
-  private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(
-      Map<ConfigResource, Config> configs) {
-    String version = configs.values().stream()
-        .map(Config::entries)
-        .flatMap(Collection::stream)
-        .filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
-        .findFirst().orElseThrow().value();
+  private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(String version) {
     try {
     try {
       final String[] parts = version.split("\\.");
       final String[] parts = version.split("\\.");
       if (parts.length > 2) {
       if (parts.length > 2) {
@@ -268,48 +322,65 @@ public class ClusterUtil {
     }
     }
   }
   }
 
 
+  public static Mono<String> getClusterVersion(AdminClient adminClient) {
+    return ClusterUtil.toMono(adminClient.describeCluster().controller())
+        .map(Node::id)
+        .map(id -> Collections
+            .singletonList(new ConfigResource(ConfigResource.Type.BROKER, id.toString())))
+        .map(brokerCR -> adminClient.describeConfigs(brokerCR).all())
+        .flatMap(ClusterUtil::toMono)
+        .map(ClusterUtil::getClusterVersion);
+  }
+
+  public static String getClusterVersion(Map<ConfigResource, Config> configs) {
+    return configs.values().stream()
+        .map(Config::entries)
+        .flatMap(Collection::stream)
+        .filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
+        .findFirst().orElseThrow().value();
+  }
+
+
   public static <T, R> Map<T, R> toSingleMap(Stream<Map<T, R>> streamOfMaps) {
   public static <T, R> Map<T, R> toSingleMap(Stream<Map<T, R>> streamOfMaps) {
     return streamOfMaps
     return streamOfMaps
         .reduce((map1, map2) -> Stream.concat(map1.entrySet().stream(), map2.entrySet().stream())
         .reduce((map1, map2) -> Stream.concat(map1.entrySet().stream(), map2.entrySet().stream())
             .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow();
             .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow();
   }
   }
 
 
-  public static Optional<ConsumerGroupDescription> filterConsumerGroupTopic(
-      ConsumerGroupDescription description, String topic) {
-    final List<MemberDescription> members = description.members().stream()
-        .map(m -> filterConsumerMemberTopic(m, topic))
-        .filter(m -> !m.assignment().topicPartitions().isEmpty())
-        .collect(Collectors.toList());
+  public static Optional<InternalConsumerGroup> filterConsumerGroupTopic(
+      InternalConsumerGroup consumerGroup, Optional<String> topic) {
+
+    final Map<TopicPartition, OffsetAndMetadata> offsets =
+        consumerGroup.getOffsets().entrySet().stream()
+            .filter(e -> topic.isEmpty() || e.getKey().topic().equals(topic.get()))
+            .collect(Collectors.toMap(
+                Map.Entry::getKey,
+                Map.Entry::getValue
+            ));
 
 
-    if (!members.isEmpty()) {
+    final Collection<InternalConsumerGroup.InternalMember> members =
+        consumerGroup.getMembers().stream()
+            .map(m -> filterConsumerMemberTopic(m, topic))
+            .filter(m -> !m.getAssignment().isEmpty())
+            .collect(Collectors.toList());
+
+    if (!members.isEmpty() || !offsets.isEmpty()) {
       return Optional.of(
       return Optional.of(
-          new ConsumerGroupDescription(
-              description.groupId(),
-              description.isSimpleConsumerGroup(),
-              members,
-              description.partitionAssignor(),
-              description.state(),
-              description.coordinator()
-          )
+          consumerGroup.toBuilder()
+            .offsets(offsets)
+            .members(members)
+            .build()
       );
       );
     } else {
     } else {
       return Optional.empty();
       return Optional.empty();
     }
     }
   }
   }
 
 
-  public static MemberDescription filterConsumerMemberTopic(
-      MemberDescription description, String topic) {
-    final Set<TopicPartition> topicPartitions = description.assignment().topicPartitions()
-        .stream().filter(tp -> tp.topic().equals(topic))
+  public static InternalConsumerGroup.InternalMember filterConsumerMemberTopic(
+      InternalConsumerGroup.InternalMember member, Optional<String> topic) {
+    final Set<TopicPartition> topicPartitions = member.getAssignment()
+        .stream().filter(tp -> topic.isEmpty() || tp.topic().equals(topic.get()))
         .collect(Collectors.toSet());
         .collect(Collectors.toSet());
-    MemberAssignment assignment = new MemberAssignment(topicPartitions);
-    return new MemberDescription(
-        description.consumerId(),
-        description.groupInstanceId(),
-        description.clientId(),
-        description.host(),
-        assignment
-    );
+    return member.toBuilder().assignment(topicPartitions).build();
   }
   }
-
 }
 }

+ 146 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeek.java

@@ -0,0 +1,146 @@
+package com.provectus.kafka.ui.util;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.SeekType;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Bytes;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+@Log4j2
+public abstract class OffsetsSeek {
+  protected final String topic;
+  protected final ConsumerPosition consumerPosition;
+
+  protected OffsetsSeek(String topic, ConsumerPosition consumerPosition) {
+    this.topic = topic;
+    this.consumerPosition = consumerPosition;
+  }
+
+  public ConsumerPosition getConsumerPosition() {
+    return consumerPosition;
+  }
+
+  public Map<TopicPartition, Long> getPartitionsOffsets(Consumer<Bytes, Bytes> consumer) {
+    SeekType seekType = consumerPosition.getSeekType();
+    List<TopicPartition> partitions = getRequestedPartitions(consumer);
+    log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
+    Map<TopicPartition, Long> offsets;
+    switch (seekType) {
+      case OFFSET:
+        offsets = offsetsFromPositions(consumer, partitions);
+        break;
+      case TIMESTAMP:
+        offsets = offsetsForTimestamp(consumer);
+        break;
+      case BEGINNING:
+        offsets = offsetsFromBeginning(consumer, partitions);
+        break;
+      default:
+        throw new IllegalArgumentException("Unknown seekType: " + seekType);
+    }
+    return offsets;
+  }
+
+  public WaitingOffsets waitingOffsets(Consumer<Bytes, Bytes> consumer,
+                                       Collection<TopicPartition> partitions) {
+    return new WaitingOffsets(topic, consumer, partitions);
+  }
+
+  public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
+    final Map<TopicPartition, Long> partitionsOffsets = getPartitionsOffsets(consumer);
+    consumer.assign(partitionsOffsets.keySet());
+    partitionsOffsets.forEach(consumer::seek);
+    log.info("Assignment: {}", consumer.assignment());
+    return waitingOffsets(consumer, partitionsOffsets.keySet());
+  }
+
+
+  public List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
+    Map<TopicPartition, Long> partitionPositions = consumerPosition.getSeekTo();
+    return consumer.partitionsFor(topic).stream()
+        .filter(
+            p -> partitionPositions.isEmpty()
+                || partitionPositions.containsKey(new TopicPartition(p.topic(), p.partition()))
+        ).map(p -> new TopicPartition(p.topic(), p.partition()))
+        .collect(Collectors.toList());
+  }
+
+
+  protected abstract Map<TopicPartition, Long> offsetsFromBeginning(
+      Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions);
+
+  protected abstract Map<TopicPartition, Long> offsetsForTimestamp(
+      Consumer<Bytes, Bytes> consumer);
+
+  protected abstract Map<TopicPartition, Long> offsetsFromPositions(
+      Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions);
+
+  public static class WaitingOffsets {
+    private final Map<Integer, Long> endOffsets; // partition number -> offset
+    private final Map<Integer, Long> beginOffsets; // partition number -> offset
+    private final String topic;
+
+    public WaitingOffsets(String topic, Consumer<?, ?> consumer,
+                          Collection<TopicPartition> partitions) {
+      this.topic = topic;
+      var allBeginningOffsets = consumer.beginningOffsets(partitions);
+      var allEndOffsets = consumer.endOffsets(partitions);
+
+      this.endOffsets = allEndOffsets.entrySet().stream()
+          .filter(entry -> !allBeginningOffsets.get(entry.getKey()).equals(entry.getValue()))
+          .map(e -> Tuples.of(e.getKey().partition(), e.getValue() - 1))
+          .collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
+
+      this.beginOffsets = this.endOffsets.keySet().stream()
+         .map(p -> Tuples.of(p, allBeginningOffsets.get(new TopicPartition(topic, p))))
+         .collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
+    }
+
+    public List<TopicPartition> topicPartitions() {
+      return this.endOffsets.keySet().stream()
+          .map(p -> new TopicPartition(topic, p))
+          .collect(Collectors.toList());
+    }
+
+    public void markPolled(int partition) {
+      endOffsets.remove(partition);
+      beginOffsets.remove(partition);
+    }
+
+    public void markPolled(ConsumerRecord<?, ?> rec) {
+      Long endWaiting = endOffsets.get(rec.partition());
+      if (endWaiting != null && endWaiting <= rec.offset()) {
+        endOffsets.remove(rec.partition());
+      }
+      Long beginWaiting = beginOffsets.get(rec.partition());
+      if (beginWaiting != null && beginWaiting >= rec.offset()) {
+        beginOffsets.remove(rec.partition());
+      }
+
+    }
+
+    public boolean endReached() {
+      return endOffsets.isEmpty();
+    }
+
+    public boolean beginReached() {
+      return beginOffsets.isEmpty();
+    }
+
+    public Map<Integer, Long> getEndOffsets() {
+      return endOffsets;
+    }
+
+    public Map<Integer, Long> getBeginOffsets() {
+      return beginOffsets;
+    }
+  }
+}

+ 120 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekBackward.java

@@ -0,0 +1,120 @@
+package com.provectus.kafka.ui.util;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Bytes;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+@Log4j2
+public class OffsetsSeekBackward extends OffsetsSeek {
+
+  private final int maxMessages;
+
+  public OffsetsSeekBackward(String topic,
+                             ConsumerPosition consumerPosition, int maxMessages) {
+    super(topic, consumerPosition);
+    this.maxMessages = maxMessages;
+  }
+
+  public int msgsPerPartition(int partitionsSize) {
+    return msgsPerPartition(maxMessages, partitionsSize);
+  }
+
+  public int msgsPerPartition(long awaitingMessages, int partitionsSize) {
+    return (int) Math.ceil((double) awaitingMessages / partitionsSize);
+  }
+
+
+  protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
+                                        List<TopicPartition> partitions) {
+
+    return findOffsetsInt(consumer, consumerPosition.getSeekTo(), partitions);
+  }
+
+  protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
+                                            List<TopicPartition> partitions) {
+    return findOffsets(consumer, Map.of(), partitions);
+  }
+
+  protected Map<TopicPartition, Long> offsetsForTimestamp(Consumer<Bytes, Bytes> consumer) {
+    Map<TopicPartition, Long> timestampsToSearch =
+        consumerPosition.getSeekTo().entrySet().stream()
+            .collect(Collectors.toMap(
+                Map.Entry::getKey,
+                e -> e.getValue()
+            ));
+    Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
+        .entrySet().stream()
+        .filter(e -> e.getValue() != null)
+        .map(v -> Tuples.of(v.getKey(), v.getValue().offset()))
+        .collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
+
+    if (offsetsForTimestamps.isEmpty()) {
+      throw new IllegalArgumentException("No offsets were found for requested timestamps");
+    }
+
+    log.info("Timestamps: {} to offsets: {}", timestampsToSearch, offsetsForTimestamps);
+
+    return findOffsets(consumer, offsetsForTimestamps, offsetsForTimestamps.keySet());
+  }
+
+  protected Map<TopicPartition, Long> findOffsetsInt(
+      Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo,
+      List<TopicPartition> partitions) {
+    return findOffsets(consumer, seekTo, partitions);
+  }
+
+  protected Map<TopicPartition, Long> findOffsets(
+      Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo,
+      Collection<TopicPartition> partitions) {
+
+    final Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
+    final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
+
+    final Map<TopicPartition, Long> seekMap = new HashMap<>();
+    final Set<TopicPartition> emptyPartitions = new HashSet<>();
+
+    for (Map.Entry<TopicPartition, Long> entry : seekTo.entrySet()) {
+      final Long endOffset = endOffsets.get(entry.getKey());
+      final Long beginningOffset = beginningOffsets.get(entry.getKey());
+      if (beginningOffset != null
+          && endOffset != null
+          && beginningOffset < endOffset
+          && entry.getValue() > beginningOffset
+      ) {
+        final Long value;
+        if (entry.getValue() > endOffset) {
+          value = endOffset;
+        } else {
+          value = entry.getValue();
+        }
+
+        seekMap.put(entry.getKey(), value);
+      } else {
+        emptyPartitions.add(entry.getKey());
+      }
+    }
+
+    Set<TopicPartition> waiting = new HashSet<>(partitions);
+    waiting.removeAll(emptyPartitions);
+    waiting.removeAll(seekMap.keySet());
+
+    for (TopicPartition topicPartition : waiting) {
+      seekMap.put(topicPartition, endOffsets.get(topicPartition));
+    }
+
+    return seekMap;
+  }
+
+
+}

+ 61 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/OffsetsSeekForward.java

@@ -0,0 +1,61 @@
+package com.provectus.kafka.ui.util;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+import lombok.extern.log4j.Log4j2;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Bytes;
+
+@Log4j2
+public class OffsetsSeekForward extends OffsetsSeek {
+
+  public OffsetsSeekForward(String topic, ConsumerPosition consumerPosition) {
+    super(topic, consumerPosition);
+  }
+
+  protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
+                                        List<TopicPartition> partitions) {
+    final Map<TopicPartition, Long> offsets =
+        offsetsFromBeginning(consumer, partitions);
+
+    final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(offsets.keySet());
+    final Set<TopicPartition> set = new HashSet<>(consumerPosition.getSeekTo().keySet());
+    final Map<TopicPartition, Long> collect = consumerPosition.getSeekTo().entrySet().stream()
+        .filter(e -> e.getValue() < endOffsets.get(e.getKey()))
+        .filter(e -> endOffsets.get(e.getKey()) > offsets.get(e.getKey()))
+        .collect(Collectors.toMap(
+            Map.Entry::getKey,
+            Map.Entry::getValue
+        ));
+    offsets.putAll(collect);
+    set.removeAll(collect.keySet());
+    set.forEach(offsets::remove);
+
+    return offsets;
+  }
+
+  protected Map<TopicPartition, Long> offsetsForTimestamp(Consumer<Bytes, Bytes> consumer) {
+    Map<TopicPartition, Long> offsetsForTimestamps =
+        consumer.offsetsForTimes(consumerPosition.getSeekTo())
+            .entrySet().stream()
+            .filter(e -> e.getValue() != null)
+            .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
+
+    if (offsetsForTimestamps.isEmpty()) {
+      throw new IllegalArgumentException("No offsets were found for requested timestamps");
+    }
+
+    return offsetsForTimestamps;
+  }
+
+  protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
+                                            List<TopicPartition> partitions) {
+    return consumer.beginningOffsets(partitions);
+  }
+
+}

+ 21 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ArrayFieldSchema.java

@@ -0,0 +1,21 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+
+public class ArrayFieldSchema implements FieldSchema {
+  private final FieldSchema itemsSchema;
+
+  public ArrayFieldSchema(FieldSchema itemsSchema) {
+    this.itemsSchema = itemsSchema;
+  }
+
+  @Override
+  public JsonNode toJsonNode(ObjectMapper mapper) {
+    final ObjectNode objectNode = mapper.createObjectNode();
+    objectNode.setAll(new SimpleJsonType(JsonType.Type.ARRAY).toJsonNode(mapper));
+    objectNode.set("items", itemsSchema.toJsonNode(mapper));
+    return objectNode;
+  }
+}

+ 137 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverter.java

@@ -0,0 +1,137 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import java.net.URI;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import org.apache.avro.Schema;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+public class AvroJsonSchemaConverter implements JsonSchemaConverter<Schema> {
+
+  @Override
+  public JsonSchema convert(URI basePath, Schema schema) {
+    final JsonSchema.JsonSchemaBuilder builder = JsonSchema.builder();
+
+    builder.id(basePath.resolve(schema.getName()));
+    JsonType type = convertType(schema);
+    builder.type(type);
+
+    Map<String, FieldSchema> definitions = new HashMap<>();
+    final FieldSchema root = convertSchema("root", schema, definitions, false);
+    builder.definitions(definitions);
+
+    if (type.getType().equals(JsonType.Type.OBJECT)) {
+      final ObjectFieldSchema objectRoot = (ObjectFieldSchema) root;
+      builder.properties(objectRoot.getProperties());
+      builder.required(objectRoot.getRequired());
+    }
+
+    return builder.build();
+  }
+
+
+  private FieldSchema convertField(Schema.Field field, Map<String, FieldSchema> definitions) {
+    return convertSchema(field.name(), field.schema(), definitions, true);
+  }
+
+  private FieldSchema convertSchema(String name, Schema schema,
+                                    Map<String, FieldSchema> definitions, boolean ref) {
+    if (!schema.isUnion() || (schema.getTypes().size() == 2 && schema.isNullable())) {
+      if (schema.isUnion()) {
+        final Optional<Schema> firstType =
+            schema.getTypes().stream().filter(t -> !t.getType().equals(Schema.Type.NULL))
+                .findFirst();
+        schema = firstType.orElseThrow();
+      }
+      JsonType type = convertType(schema);
+      switch (type.getType()) {
+        case BOOLEAN:
+        case NULL:
+        case STRING:
+        case ENUM:
+        case NUMBER:
+        case INTEGER:
+          return new SimpleFieldSchema(type);
+        case OBJECT:
+          if (schema.getType().equals(Schema.Type.MAP)) {
+            return new MapFieldSchema(convertSchema(name, schema.getValueType(), definitions, ref));
+          } else {
+            return createObjectSchema(name, schema, definitions, ref);
+          }
+        case ARRAY:
+          return createArraySchema(name, schema, definitions);
+        default: throw new RuntimeException("Unknown type");
+      }
+    } else {
+      return new OneOfFieldSchema(
+          schema.getTypes().stream()
+              .map(typeSchema ->
+                  convertSchema(
+                      name + UUID.randomUUID().toString(),
+                      typeSchema,
+                      definitions,
+                      true
+                  )
+              ).collect(Collectors.toList())
+      );
+    }
+  }
+
+  private FieldSchema createObjectSchema(String name, Schema schema,
+                                         Map<String, FieldSchema> definitions, boolean ref) {
+    final Map<String, FieldSchema> fields = schema.getFields().stream()
+        .map(f -> Tuples.of(f.name(), convertField(f, definitions)))
+        .collect(Collectors.toMap(
+            Tuple2::getT1,
+            Tuple2::getT2
+        ));
+
+    final List<String> required = schema.getFields().stream()
+        .filter(f -> !f.schema().isNullable())
+        .map(Schema.Field::name).collect(Collectors.toList());
+
+    if (ref) {
+      String definitionName = String.format("Record%s", schema.getName());
+      definitions.put(definitionName, new ObjectFieldSchema(fields, required));
+      return new RefFieldSchema(String.format("#/definitions/%s", definitionName));
+    } else {
+      return new ObjectFieldSchema(fields, required);
+    }
+  }
+
+  private ArrayFieldSchema createArraySchema(String name, Schema schema,
+                                             Map<String, FieldSchema> definitions) {
+    return new ArrayFieldSchema(
+        convertSchema(name, schema.getElementType(), definitions, true)
+    );
+  }
+
+  private JsonType convertType(Schema schema) {
+    switch (schema.getType()) {
+      case INT:
+      case LONG:
+        return new SimpleJsonType(JsonType.Type.INTEGER);
+      case MAP:
+      case RECORD:
+        return new SimpleJsonType(JsonType.Type.OBJECT);
+      case ENUM:
+        return new EnumJsonType(schema.getEnumSymbols());
+      case BYTES:
+      case STRING:
+        return new SimpleJsonType(JsonType.Type.STRING);
+      case NULL: return new SimpleJsonType(JsonType.Type.NULL);
+      case ARRAY: return new SimpleJsonType(JsonType.Type.ARRAY);
+      case FIXED:
+      case FLOAT:
+      case DOUBLE:
+        return new SimpleJsonType(JsonType.Type.NUMBER);
+      case BOOLEAN: return new SimpleJsonType(JsonType.Type.BOOLEAN);
+      default: return new SimpleJsonType(JsonType.Type.STRING);
+    }
+  }
+}

+ 24 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/EnumJsonType.java

@@ -0,0 +1,24 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.util.List;
+import java.util.Map;
+
+
+public class EnumJsonType extends JsonType {
+  private final List<String> values;
+
+  public EnumJsonType(List<String> values) {
+    super(Type.ENUM);
+    this.values = values;
+  }
+
+  @Override
+  public Map<String, JsonNode> toJsonNode(ObjectMapper mapper) {
+    return Map.of(
+        this.type.getName(),
+        mapper.valueToTree(values)
+    );
+  }
+}

+ 8 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/FieldSchema.java

@@ -0,0 +1,8 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+public interface FieldSchema {
+  JsonNode toJsonNode(ObjectMapper mapper);
+}

+ 65 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/JsonSchema.java

@@ -0,0 +1,65 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.fasterxml.jackson.databind.node.TextNode;
+import java.net.URI;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import lombok.Builder;
+import lombok.Data;
+import lombok.SneakyThrows;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+@Data
+@Builder
+public class JsonSchema {
+  private final URI id;
+  private final URI schema = URI.create("https://json-schema.org/draft/2020-12/schema");
+  private final String title;
+  private final JsonType type;
+  private final Map<String, FieldSchema> properties;
+  private final Map<String, FieldSchema> definitions;
+  private final List<String> required;
+
+  public String toJson(ObjectMapper mapper) {
+    final ObjectNode objectNode = mapper.createObjectNode();
+    objectNode.set("$id", new TextNode(id.toString()));
+    objectNode.set("$schema", new TextNode(schema.toString()));
+    objectNode.setAll(type.toJsonNode(mapper));
+    if (properties != null && !properties.isEmpty()) {
+      objectNode.set("properties", mapper.valueToTree(
+          properties.entrySet().stream()
+              .map(e -> Tuples.of(e.getKey(), e.getValue().toJsonNode(mapper)))
+              .collect(Collectors.toMap(
+                  Tuple2::getT1,
+                  Tuple2::getT2
+              ))
+      ));
+      if (!required.isEmpty()) {
+        objectNode.set("required", mapper.valueToTree(required));
+      }
+    }
+    if (definitions != null && !definitions.isEmpty()) {
+      objectNode.set("definitions", mapper.valueToTree(
+          definitions.entrySet().stream()
+              .map(e -> Tuples.of(e.getKey(), e.getValue().toJsonNode(mapper)))
+              .collect(Collectors.toMap(
+                  Tuple2::getT1,
+                  Tuple2::getT2
+              ))
+      ));
+    }
+    return objectNode.toString();
+  }
+
+  @SneakyThrows
+  public static JsonSchema stringSchema() {
+    return JsonSchema.builder()
+        .id(new URI("http://unknown.unknown"))
+        .type(new SimpleJsonType(JsonType.Type.STRING))
+        .build();
+  }
+}

+ 7 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/JsonSchemaConverter.java

@@ -0,0 +1,7 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import java.net.URI;
+
+public interface JsonSchemaConverter<T> {
+  JsonSchema convert(URI basePath, T schema);
+}

+ 41 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/JsonType.java

@@ -0,0 +1,41 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.util.Map;
+
+public abstract class JsonType {
+
+  protected final Type type;
+
+  public JsonType(Type type) {
+    this.type = type;
+  }
+
+  public Type getType() {
+    return type;
+  }
+
+  public abstract Map<String, JsonNode> toJsonNode(ObjectMapper mapper);
+
+  public enum Type {
+    NULL,
+    BOOLEAN,
+    OBJECT,
+    ARRAY,
+    NUMBER,
+    INTEGER,
+    ENUM,
+    STRING;
+
+    private final String name;
+
+    Type() {
+      this.name = this.name().toLowerCase();
+    }
+
+    public String getName() {
+      return name;
+    }
+  }
+}

+ 22 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/MapFieldSchema.java

@@ -0,0 +1,22 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.fasterxml.jackson.databind.node.TextNode;
+
+public class MapFieldSchema implements FieldSchema {
+  private final FieldSchema itemSchema;
+
+  public MapFieldSchema(FieldSchema itemSchema) {
+    this.itemSchema = itemSchema;
+  }
+
+  @Override
+  public JsonNode toJsonNode(ObjectMapper mapper) {
+    final ObjectNode objectNode = mapper.createObjectNode();
+    objectNode.set("type", new TextNode(JsonType.Type.OBJECT.getName()));
+    objectNode.set("additionalProperties", itemSchema.toJsonNode(mapper));
+    return objectNode;
+  }
+}

+ 46 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ObjectFieldSchema.java

@@ -0,0 +1,46 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+public class ObjectFieldSchema implements FieldSchema {
+  private final Map<String, FieldSchema> properties;
+  private final List<String> required;
+
+  public ObjectFieldSchema(Map<String, FieldSchema> properties,
+                           List<String> required) {
+    this.properties = properties;
+    this.required = required;
+  }
+
+  public Map<String, FieldSchema> getProperties() {
+    return properties;
+  }
+
+  public List<String> getRequired() {
+    return required;
+  }
+
+  @Override
+  public JsonNode toJsonNode(ObjectMapper mapper) {
+    final Map<String, JsonNode> nodes = properties.entrySet().stream()
+        .map(e -> Tuples.of(e.getKey(), e.getValue().toJsonNode(mapper)))
+        .collect(Collectors.toMap(
+            Tuple2::getT1,
+            Tuple2::getT2
+        ));
+    final ObjectNode objectNode = mapper.createObjectNode();
+    objectNode.setAll(new SimpleJsonType(JsonType.Type.OBJECT).toJsonNode(mapper));
+    objectNode.set("properties", mapper.valueToTree(nodes));
+    if (!required.isEmpty()) {
+      objectNode.set("required", mapper.valueToTree(required));
+    }
+    return objectNode;
+  }
+}

+ 27 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/OneOfFieldSchema.java

@@ -0,0 +1,27 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.util.List;
+import java.util.stream.Collectors;
+
+public class OneOfFieldSchema implements FieldSchema {
+  private final List<FieldSchema> schemaList;
+
+  public OneOfFieldSchema(
+      List<FieldSchema> schemaList) {
+    this.schemaList = schemaList;
+  }
+
+  @Override
+  public JsonNode toJsonNode(ObjectMapper mapper) {
+    return mapper.createObjectNode()
+        .set("oneOf",
+            mapper.createArrayNode().addAll(
+                schemaList.stream()
+                    .map(s -> s.toJsonNode(mapper))
+                    .collect(Collectors.toList())
+            )
+        );
+  }
+}

+ 134 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverter.java

@@ -0,0 +1,134 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.google.protobuf.Descriptors;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.Descriptor> {
+  @Override
+  public JsonSchema convert(URI basePath, Descriptors.Descriptor schema) {
+    final JsonSchema.JsonSchemaBuilder builder = JsonSchema.builder();
+
+    builder.id(basePath.resolve(schema.getFullName()));
+    builder.type(new SimpleJsonType(JsonType.Type.OBJECT));
+
+    Map<String, FieldSchema> definitions = new HashMap<>();
+    final ObjectFieldSchema root =
+        (ObjectFieldSchema) convertObjectSchema(schema, definitions, false);
+    builder.definitions(definitions);
+
+    builder.properties(root.getProperties());
+    builder.required(root.getRequired());
+
+    return builder.build();
+  }
+
+  private FieldSchema convertObjectSchema(Descriptors.Descriptor schema,
+                                          Map<String, FieldSchema> definitions, boolean ref) {
+    final Map<String, FieldSchema> fields = schema.getFields().stream()
+        .map(f -> Tuples.of(f.getName(), convertField(f, definitions)))
+        .collect(Collectors.toMap(
+            Tuple2::getT1,
+            Tuple2::getT2
+        ));
+
+    final Map<String, OneOfFieldSchema> oneOfFields = schema.getOneofs().stream().map(o ->
+        Tuples.of(
+            o.getName(),
+            new OneOfFieldSchema(
+              o.getFields().stream().map(
+                  Descriptors.FieldDescriptor::getName
+              ).map(fields::get).collect(Collectors.toList())
+            )
+        )
+    ).collect(Collectors.toMap(
+        Tuple2::getT1,
+        Tuple2::getT2
+    ));
+
+    final List<String> allOneOfFields = schema.getOneofs().stream().flatMap(o ->
+                o.getFields().stream().map(Descriptors.FieldDescriptor::getName)
+        ).collect(Collectors.toList());
+
+    final Map<String, FieldSchema> excludedOneOf = fields.entrySet().stream()
+        .filter(f -> !allOneOfFields.contains(f.getKey()))
+        .collect(Collectors.toMap(
+            Map.Entry::getKey,
+            Map.Entry::getValue
+        ));
+
+    Map<String, FieldSchema> finalFields = new HashMap<>(excludedOneOf);
+    finalFields.putAll(oneOfFields);
+
+    final List<String> required = schema.getFields().stream()
+        .filter(f -> !f.isOptional())
+        .map(Descriptors.FieldDescriptor::getName).collect(Collectors.toList());
+
+    if (ref) {
+      String definitionName = String.format("record.%s", schema.getFullName());
+      definitions.put(definitionName, new ObjectFieldSchema(finalFields, required));
+      return new RefFieldSchema(String.format("#/definitions/%s", definitionName));
+    } else {
+      return new ObjectFieldSchema(fields, required);
+    }
+  }
+
+  private FieldSchema convertField(Descriptors.FieldDescriptor field,
+                              Map<String, FieldSchema> definitions) {
+    final JsonType jsonType = convertType(field);
+
+    FieldSchema fieldSchema;
+    if (jsonType.getType().equals(JsonType.Type.OBJECT)) {
+      fieldSchema = convertObjectSchema(field.getMessageType(), definitions, true);
+    } else {
+      fieldSchema = new SimpleFieldSchema(jsonType);
+    }
+
+    if (field.isRepeated()) {
+      return new ArrayFieldSchema(fieldSchema);
+    } else {
+      return fieldSchema;
+    }
+  }
+
+
+  private JsonType convertType(Descriptors.FieldDescriptor field) {
+    switch (field.getType()) {
+      case INT32:
+      case INT64:
+      case SINT32:
+      case SINT64:
+      case UINT32:
+      case UINT64:
+      case FIXED32:
+      case FIXED64:
+      case SFIXED32:
+      case SFIXED64:
+        return new SimpleJsonType(JsonType.Type.INTEGER);
+      case MESSAGE:
+      case GROUP:
+        return new SimpleJsonType(JsonType.Type.OBJECT);
+      case ENUM:
+        return new EnumJsonType(
+            field.getEnumType().getValues().stream()
+                .map(Descriptors.EnumValueDescriptor::getName)
+                .collect(Collectors.toList())
+        );
+      case BYTES:
+      case STRING:
+        return new SimpleJsonType(JsonType.Type.STRING);
+      case FLOAT:
+      case DOUBLE:
+        return new SimpleJsonType(JsonType.Type.NUMBER);
+      case BOOL:
+        return new SimpleJsonType(JsonType.Type.BOOLEAN);
+      default:
+        return new SimpleJsonType(JsonType.Type.STRING);
+    }
+  }
+}

+ 18 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/RefFieldSchema.java

@@ -0,0 +1,18 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.TextNode;
+
+public class RefFieldSchema implements FieldSchema {
+  private final String ref;
+
+  public RefFieldSchema(String ref) {
+    this.ref = ref;
+  }
+
+  @Override
+  public JsonNode toJsonNode(ObjectMapper mapper) {
+    return mapper.createObjectNode().set("$ref", new TextNode(ref));
+  }
+}

+ 17 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/SimpleFieldSchema.java

@@ -0,0 +1,17 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+
+public class SimpleFieldSchema implements FieldSchema {
+  private final JsonType type;
+
+  public SimpleFieldSchema(JsonType type) {
+    this.type = type;
+  }
+
+  @Override
+  public JsonNode toJsonNode(ObjectMapper mapper) {
+    return mapper.createObjectNode().setAll(type.toJsonNode(mapper));
+  }
+}

+ 21 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/jsonschema/SimpleJsonType.java

@@ -0,0 +1,21 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.TextNode;
+import java.util.Map;
+
+public class SimpleJsonType extends JsonType {
+
+  public SimpleJsonType(Type type) {
+    super(type);
+  }
+
+  @Override
+  public Map<String, JsonNode> toJsonNode(ObjectMapper mapper) {
+    return Map.of(
+        "type",
+        new TextNode(type.getName())
+    );
+  }
+}

+ 12 - 13
kafka-ui-api/src/main/resources/application-local.yml

@@ -1,24 +1,23 @@
 kafka:
 kafka:
   clusters:
   clusters:
-    -
-      name: local
-      bootstrapServers: localhost:9092
+    - name: local
+      bootstrapServers: localhost:9093
       zookeeper: localhost:2181
       zookeeper: localhost:2181
       schemaRegistry: http://localhost:8081
       schemaRegistry: http://localhost:8081
       kafkaConnect:
       kafkaConnect:
         - name: first
         - name: first
           address: http://localhost:8083
           address: http://localhost:8083
       jmxPort: 9997
       jmxPort: 9997
-    -
-      name: secondLocal
-      bootstrapServers: localhost:9093
-      zookeeper: localhost:2182
-      schemaRegistry: http://localhost:18085
-      kafkaConnect:
-        - name: first
-          address: http://localhost:8083
-      jmxPort: 9998
-      read-only: true
+  #    -
+  #      name: secondLocal
+  #      bootstrapServers: localhost:9093
+  #      zookeeper: localhost:2182
+  #      schemaRegistry: http://localhost:18085
+  #      kafkaConnect:
+  #        - name: first
+  #          address: http://localhost:8083
+  #      jmxPort: 9998
+  #      read-only: true
   admin-client-timeout: 5000
   admin-client-timeout: 5000
 zookeeper:
 zookeeper:
   connection-timeout: 1000
   connection-timeout: 1000

+ 9 - 10
kafka-ui-api/src/main/resources/application-sdp.yml

@@ -1,15 +1,14 @@
 kafka:
 kafka:
   clusters:
   clusters:
-    -
-      name: local
-      bootstrapServers: kafka0:29092
-      zookeeper: zookeeper0:2181
-      schemaRegistry: http://schemaregistry0:8085
-    -
-      name: secondLocal
-      zookeeper: zookeeper1:2181
-      bootstrapServers: kafka1:29092
-      schemaRegistry: http://schemaregistry1:8085
+    - name: local
+      bootstrapServers: b-1.kad-msk.uxahxx.c6.kafka.eu-west-1.amazonaws.com:9092
+#      zookeeper: localhost:2181
+      schemaRegistry: http://kad-ecs-application-lb-857515197.eu-west-1.elb.amazonaws.com:9000/api/schema-registry
+  #    -
+  #      name: secondLocal
+  #      zookeeper: zookeeper1:2181
+  #      bootstrapServers: kafka1:29092
+  #      schemaRegistry: http://schemaregistry1:8085
   admin-client-timeout: 5000
   admin-client-timeout: 5000
 zookeeper:
 zookeeper:
   connection-timeout: 1000
   connection-timeout: 1000

+ 4 - 4
kafka-ui-api/src/test/java/com/provectus/kafka/ui/AbstractBaseTest.java

@@ -24,8 +24,8 @@ import org.testcontainers.utility.DockerImageName;
 @SpringBootTest
 @SpringBootTest
 @ActiveProfiles("test")
 @ActiveProfiles("test")
 public abstract class AbstractBaseTest {
 public abstract class AbstractBaseTest {
-  public static String LOCAL = "local";
-  public static String SECOND_LOCAL = "secondLocal";
+  public static final String LOCAL = "local";
+  public static final String SECOND_LOCAL = "secondLocal";
 
 
   private static final String CONFLUENT_PLATFORM_VERSION = "5.5.0";
   private static final String CONFLUENT_PLATFORM_VERSION = "5.5.0";
 
 
@@ -56,14 +56,14 @@ public abstract class AbstractBaseTest {
     public void initialize(@NotNull ConfigurableApplicationContext context) {
     public void initialize(@NotNull ConfigurableApplicationContext context) {
       System.setProperty("kafka.clusters.0.name", LOCAL);
       System.setProperty("kafka.clusters.0.name", LOCAL);
       System.setProperty("kafka.clusters.0.bootstrapServers", kafka.getBootstrapServers());
       System.setProperty("kafka.clusters.0.bootstrapServers", kafka.getBootstrapServers());
-      System.setProperty("kafka.clusters.0.schemaRegistry", schemaRegistry.getTarget());
+      System.setProperty("kafka.clusters.0.schemaRegistry", schemaRegistry.getUrl());
       System.setProperty("kafka.clusters.0.kafkaConnect.0.name", "kafka-connect");
       System.setProperty("kafka.clusters.0.kafkaConnect.0.name", "kafka-connect");
       System.setProperty("kafka.clusters.0.kafkaConnect.0.address", kafkaConnect.getTarget());
       System.setProperty("kafka.clusters.0.kafkaConnect.0.address", kafkaConnect.getTarget());
 
 
       System.setProperty("kafka.clusters.1.name", SECOND_LOCAL);
       System.setProperty("kafka.clusters.1.name", SECOND_LOCAL);
       System.setProperty("kafka.clusters.1.readOnly", "true");
       System.setProperty("kafka.clusters.1.readOnly", "true");
       System.setProperty("kafka.clusters.1.bootstrapServers", kafka.getBootstrapServers());
       System.setProperty("kafka.clusters.1.bootstrapServers", kafka.getBootstrapServers());
-      System.setProperty("kafka.clusters.1.schemaRegistry", schemaRegistry.getTarget());
+      System.setProperty("kafka.clusters.1.schemaRegistry", schemaRegistry.getUrl());
       System.setProperty("kafka.clusters.1.kafkaConnect.0.name", "kafka-connect");
       System.setProperty("kafka.clusters.1.kafkaConnect.0.name", "kafka-connect");
       System.setProperty("kafka.clusters.1.kafkaConnect.0.address", kafkaConnect.getTarget());
       System.setProperty("kafka.clusters.1.kafkaConnect.0.address", kafkaConnect.getTarget());
     }
     }

+ 99 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerGroupTests.java

@@ -0,0 +1,99 @@
+package com.provectus.kafka.ui;
+
+import java.time.Duration;
+import java.util.List;
+import java.util.Properties;
+import java.util.UUID;
+import lombok.extern.log4j.Log4j2;
+import lombok.val;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.common.serialization.BytesDeserializer;
+import org.apache.kafka.common.utils.Bytes;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWebTestClient;
+import org.springframework.test.context.ContextConfiguration;
+import org.springframework.test.web.reactive.server.WebTestClient;
+
+@ContextConfiguration(initializers = {AbstractBaseTest.Initializer.class})
+@Log4j2
+@AutoConfigureWebTestClient(timeout = "10000")
+public class KafkaConsumerGroupTests extends AbstractBaseTest {
+  @Autowired
+  WebTestClient webTestClient;
+
+  @Test
+  void shouldNotFoundWhenNoSuchConsumerGroupId() {
+    String groupId = "groupA";
+    String expError = "The group id does not exist";
+    webTestClient
+        .delete()
+        .uri("/api/clusters/{clusterName}/consumer-groups/{groupId}", LOCAL, groupId)
+        .exchange()
+        .expectStatus()
+        .isNotFound();
+  }
+
+  @Test
+  void shouldOkWhenConsumerGroupIsNotActive() {
+    String topicName = createTopicWithRandomName();
+
+    //Create a consumer and subscribe to the topic
+    String groupId = UUID.randomUUID().toString();
+    val consumer = createTestConsumerWithGroupId(groupId);
+    consumer.subscribe(List.of(topicName));
+    consumer.poll(Duration.ofMillis(100));
+
+    //Unsubscribe from all topics to be able to delete this consumer
+    consumer.unsubscribe();
+
+    //Delete the consumer when it's INACTIVE and check
+    webTestClient
+        .delete()
+        .uri("/api/clusters/{clusterName}/consumer-groups/{groupId}", LOCAL, groupId)
+        .exchange()
+        .expectStatus()
+        .isOk();
+  }
+
+  @Test
+  void shouldBeBadRequestWhenConsumerGroupIsActive() {
+    String topicName = createTopicWithRandomName();
+
+    //Create a consumer and subscribe to the topic
+    String groupId = UUID.randomUUID().toString();
+    val consumer = createTestConsumerWithGroupId(groupId);
+    consumer.subscribe(List.of(topicName));
+    consumer.poll(Duration.ofMillis(100));
+
+    //Try to delete the consumer when it's ACTIVE
+    String expError = "The group is not empty";
+    webTestClient
+        .delete()
+        .uri("/api/clusters/{clusterName}/consumer-groups/{groupId}", LOCAL, groupId)
+        .exchange()
+        .expectStatus()
+        .isBadRequest();
+  }
+
+  private String createTopicWithRandomName() {
+    String topicName = UUID.randomUUID().toString();
+    short replicationFactor = 1;
+    int partitions = 1;
+    createTopic(new NewTopic(topicName, partitions, replicationFactor));
+    return topicName;
+  }
+
+  private KafkaConsumer<Bytes, Bytes> createTestConsumerWithGroupId(String groupId) {
+    Properties props = new Properties();
+    props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
+    props.put(ConsumerConfig.CLIENT_ID_CONFIG, groupId);
+    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
+    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
+    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
+    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
+    return new KafkaConsumer<>(props);
+  }
+}

+ 51 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/KafkaConsumerTests.java

@@ -1,12 +1,16 @@
 package com.provectus.kafka.ui;
 package com.provectus.kafka.ui;
 
 
+import com.provectus.kafka.ui.model.PartitionsIncrease;
+import com.provectus.kafka.ui.model.PartitionsIncreaseResponse;
 import com.provectus.kafka.ui.model.TopicCreation;
 import com.provectus.kafka.ui.model.TopicCreation;
+import com.provectus.kafka.ui.model.TopicDetails;
 import com.provectus.kafka.ui.model.TopicMessage;
 import com.provectus.kafka.ui.model.TopicMessage;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
 import java.util.Map;
 import java.util.Map;
 import java.util.UUID;
 import java.util.UUID;
 import java.util.stream.Stream;
 import java.util.stream.Stream;
 import lombok.extern.log4j.Log4j2;
 import lombok.extern.log4j.Log4j2;
+import org.junit.jupiter.api.Assertions;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Test;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.beans.factory.annotation.Autowired;
 import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWebTestClient;
 import org.springframework.boot.test.autoconfigure.web.reactive.AutoConfigureWebTestClient;
@@ -65,6 +69,53 @@ public class KafkaConsumerTests extends AbstractBaseTest {
         .hasSize(0);
         .hasSize(0);
   }
   }
 
 
+  @Test
+  public void shouldIncreasePartitionsUpTo10() {
+    var topicName = UUID.randomUUID().toString();
+    webTestClient.post()
+        .uri("/api/clusters/{clusterName}/topics", LOCAL)
+        .bodyValue(new TopicCreation()
+            .name(topicName)
+            .partitions(1)
+            .replicationFactor(1)
+            .configs(Map.of())
+        )
+        .exchange()
+        .expectStatus()
+        .isOk();
+
+    PartitionsIncreaseResponse response = webTestClient.patch()
+        .uri("/api/clusters/{clusterName}/topics/{topicName}/partitions",
+            LOCAL,
+            topicName)
+        .bodyValue(new PartitionsIncrease()
+            .totalPartitionsCount(10)
+        )
+        .exchange()
+        .expectStatus()
+        .isOk()
+        .expectBody(PartitionsIncreaseResponse.class)
+        .returnResult()
+        .getResponseBody();
+
+    assert response != null;
+    Assertions.assertEquals(10, response.getTotalPartitionsCount());
+
+    TopicDetails topicDetails = webTestClient.get()
+        .uri("/api/clusters/{clusterName}/topics/{topicName}",
+            LOCAL,
+            topicName)
+        .exchange()
+        .expectStatus()
+        .isOk()
+        .expectBody(TopicDetails.class)
+        .returnResult()
+        .getResponseBody();
+
+    assert topicDetails != null;
+    Assertions.assertEquals(10, topicDetails.getPartitionCount());
+  }
+
   @Test
   @Test
   public void shouldReturn404ForNonExistingTopic() {
   public void shouldReturn404ForNonExistingTopic() {
     var topicName = UUID.randomUUID().toString();
     var topicName = UUID.randomUUID().toString();

+ 8 - 1
kafka-ui-api/src/test/java/com/provectus/kafka/ui/container/SchemaRegistryContainer.java

@@ -1,5 +1,7 @@
 package com.provectus.kafka.ui.container;
 package com.provectus.kafka.ui.container;
 
 
+import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient;
+import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
 import org.testcontainers.containers.GenericContainer;
 import org.testcontainers.containers.GenericContainer;
 import org.testcontainers.containers.KafkaContainer;
 import org.testcontainers.containers.KafkaContainer;
 import org.testcontainers.containers.Network;
 import org.testcontainers.containers.Network;
@@ -25,7 +27,12 @@ public class SchemaRegistryContainer extends GenericContainer<SchemaRegistryCont
     return self();
     return self();
   }
   }
 
 
-  public String getTarget() {
+  public String getUrl() {
     return "http://" + getContainerIpAddress() + ":" + getMappedPort(SCHEMA_PORT);
     return "http://" + getContainerIpAddress() + ":" + getMappedPort(SCHEMA_PORT);
   }
   }
+
+  public SchemaRegistryClient schemaRegistryClient() {
+    return new CachedSchemaRegistryClient(getUrl(), 1000);
+  }
+
 }
 }

+ 8 - 8
kafka-ui-api/src/test/java/com/provectus/kafka/ui/deserialization/SchemaRegistryRecordDeserializerTest.java → kafka-ui-api/src/test/java/com/provectus/kafka/ui/serde/SchemaRegistryRecordDeserializerTest.java

@@ -1,22 +1,22 @@
-package com.provectus.kafka.ui.deserialization;
+package com.provectus.kafka.ui.serde;
 
 
+import static com.provectus.kafka.ui.serde.RecordSerDe.DeserializedKeyValue;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 
 
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.fasterxml.jackson.databind.ObjectMapper;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import java.util.Map;
+import com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Test;
 
 
 class SchemaRegistryRecordDeserializerTest {
 class SchemaRegistryRecordDeserializerTest {
 
 
-  private final SchemaRegistryRecordDeserializer deserializer =
-      new SchemaRegistryRecordDeserializer(
+  private final SchemaRegistryAwareRecordSerDe deserializer =
+      new SchemaRegistryAwareRecordSerDe(
           KafkaCluster.builder()
           KafkaCluster.builder()
               .schemaNameTemplate("%s-value")
               .schemaNameTemplate("%s-value")
-              .build(),
-          new ObjectMapper()
+              .build()
       );
       );
 
 
   @Test
   @Test
@@ -25,13 +25,13 @@ class SchemaRegistryRecordDeserializerTest {
     var deserializedRecord = deserializer.deserialize(
     var deserializedRecord = deserializer.deserialize(
         new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()),
         new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()),
             Bytes.wrap(value.getBytes())));
             Bytes.wrap(value.getBytes())));
-    assertEquals(value, deserializedRecord);
+    assertEquals(new DeserializedKeyValue("key", value), deserializedRecord);
   }
   }
 
 
   @Test
   @Test
   public void shouldDeserializeNullValueRecordToEmptyMap() {
   public void shouldDeserializeNullValueRecordToEmptyMap() {
     var deserializedRecord = deserializer
     var deserializedRecord = deserializer
         .deserialize(new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()), null));
         .deserialize(new ConsumerRecord<>("topic", 1, 0, Bytes.wrap("key".getBytes()), null));
-    assertEquals(Map.of(), deserializedRecord);
+    assertEquals(new DeserializedKeyValue("key", null), deserializedRecord);
   }
   }
 }
 }

+ 64 - 10
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ClusterServiceTest.java

@@ -1,6 +1,7 @@
 package com.provectus.kafka.ui.service;
 package com.provectus.kafka.ui.service;
 
 
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThat;
+import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.when;
 
 
 import com.provectus.kafka.ui.mapper.ClusterMapper;
 import com.provectus.kafka.ui.mapper.ClusterMapper;
@@ -31,22 +32,32 @@ class ClusterServiceTest {
   private ClusterService clusterService;
   private ClusterService clusterService;
   @Mock
   @Mock
   private ClustersStorage clustersStorage;
   private ClustersStorage clustersStorage;
+  @Mock
+  private KafkaService kafkaService;
 
 
   @Test
   @Test
   public void shouldListFirst25Topics() {
   public void shouldListFirst25Topics() {
     var topicName = UUID.randomUUID().toString();
     var topicName = UUID.randomUUID().toString();
 
 
+    final KafkaCluster cluster = KafkaCluster.builder()
+        .topics(
+            IntStream.rangeClosed(1, 100).boxed()
+                .map(Objects::toString)
+                .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
+                    .partitions(Map.of())
+                    .name(e)
+                    .build()))
+        )
+        .build();
+
     when(clustersStorage.getClusterByName(topicName))
     when(clustersStorage.getClusterByName(topicName))
-        .thenReturn(Optional.of(KafkaCluster.builder()
-            .topics(
-                IntStream.rangeClosed(1, 100).boxed()
-                    .map(Objects::toString)
-                    .collect(Collectors.toMap(Function.identity(), e -> InternalTopic.builder()
-                        .partitions(Map.of())
-                        .name(e)
-                        .build()))
-            )
-            .build()));
+        .thenReturn(Optional.of(cluster));
+
+    when(
+        kafkaService.getTopicPartitions(any(), any())
+    ).thenReturn(
+        Map.of()
+    );
 
 
     var topics = clusterService.getTopics(topicName,
     var topics = clusterService.getTopics(topicName,
         Optional.empty(), Optional.empty(), Optional.empty(),
         Optional.empty(), Optional.empty(), Optional.empty(),
@@ -72,6 +83,13 @@ class ClusterServiceTest {
             )
             )
             .build()));
             .build()));
 
 
+    when(
+        kafkaService.getTopicPartitions(any(), any())
+    ).thenReturn(
+        Map.of()
+    );
+
+
     var topics = clusterService.getTopics(topicName, Optional.of(4), Optional.of(33),
     var topics = clusterService.getTopics(topicName, Optional.of(4), Optional.of(33),
         Optional.empty(), Optional.empty(), Optional.empty());
         Optional.empty(), Optional.empty(), Optional.empty());
     assertThat(topics.getPageCount()).isEqualTo(4);
     assertThat(topics.getPageCount()).isEqualTo(4);
@@ -95,6 +113,13 @@ class ClusterServiceTest {
             )
             )
             .build()));
             .build()));
 
 
+    when(
+        kafkaService.getTopicPartitions(any(), any())
+    ).thenReturn(
+        Map.of()
+    );
+
+
     var topics = clusterService.getTopics(topicName, Optional.of(0), Optional.of(-1),
     var topics = clusterService.getTopics(topicName, Optional.of(0), Optional.of(-1),
         Optional.empty(), Optional.empty(), Optional.empty());
         Optional.empty(), Optional.empty(), Optional.empty());
     assertThat(topics.getPageCount()).isEqualTo(4);
     assertThat(topics.getPageCount()).isEqualTo(4);
@@ -119,6 +144,13 @@ class ClusterServiceTest {
             )
             )
             .build()));
             .build()));
 
 
+    when(
+        kafkaService.getTopicPartitions(any(), any())
+    ).thenReturn(
+        Map.of()
+    );
+
+
     var topics = clusterService.getTopics(topicName,
     var topics = clusterService.getTopics(topicName,
         Optional.empty(), Optional.empty(), Optional.of(true),
         Optional.empty(), Optional.empty(), Optional.of(true),
         Optional.empty(), Optional.empty());
         Optional.empty(), Optional.empty());
@@ -145,6 +177,13 @@ class ClusterServiceTest {
             )
             )
             .build()));
             .build()));
 
 
+    when(
+        kafkaService.getTopicPartitions(any(), any())
+    ).thenReturn(
+        Map.of()
+    );
+
+
     var topics = clusterService.getTopics(topicName,
     var topics = clusterService.getTopics(topicName,
         Optional.empty(), Optional.empty(), Optional.of(true),
         Optional.empty(), Optional.empty(), Optional.of(true),
         Optional.empty(), Optional.empty());
         Optional.empty(), Optional.empty());
@@ -170,6 +209,13 @@ class ClusterServiceTest {
             )
             )
             .build()));
             .build()));
 
 
+    when(
+        kafkaService.getTopicPartitions(any(), any())
+    ).thenReturn(
+        Map.of()
+    );
+
+
     var topics = clusterService.getTopics(topicName,
     var topics = clusterService.getTopics(topicName,
         Optional.empty(), Optional.empty(), Optional.empty(),
         Optional.empty(), Optional.empty(), Optional.empty(),
         Optional.of("1"), Optional.empty());
         Optional.of("1"), Optional.empty());
@@ -195,6 +241,13 @@ class ClusterServiceTest {
             )
             )
             .build()));
             .build()));
 
 
+    when(
+        kafkaService.getTopicPartitions(any(), any())
+    ).thenReturn(
+        Map.of()
+    );
+
+
     var topics = clusterService.getTopics(topicName,
     var topics = clusterService.getTopics(topicName,
         Optional.empty(), Optional.empty(), Optional.empty(),
         Optional.empty(), Optional.empty(), Optional.empty(),
         Optional.empty(), Optional.of(TopicColumnsToSort.TOTAL_PARTITIONS));
         Optional.empty(), Optional.of(TopicColumnsToSort.TOTAL_PARTITIONS));
@@ -202,4 +255,5 @@ class ClusterServiceTest {
     assertThat(topics.getTopics()).hasSize(25);
     assertThat(topics.getTopics()).hasSize(25);
     assertThat(topics.getTopics()).map(Topic::getPartitionCount).isSorted();
     assertThat(topics.getTopics()).map(Topic::getPartitionCount).isSorted();
   }
   }
+
 }
 }

+ 219 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsResetServiceTest.java

@@ -0,0 +1,219 @@
+package com.provectus.kafka.ui.service;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import com.provectus.kafka.ui.AbstractBaseTest;
+import com.provectus.kafka.ui.exception.NotFoundException;
+import com.provectus.kafka.ui.exception.ValidationException;
+import com.provectus.kafka.ui.model.KafkaCluster;
+import java.time.Duration;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.OffsetAndMetadata;
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.serialization.BytesSerializer;
+import org.apache.kafka.common.utils.Bytes;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+
+public class OffsetsResetServiceTest extends AbstractBaseTest {
+
+  private static final int PARTITIONS = 5;
+
+  private static final KafkaCluster CLUSTER =
+      KafkaCluster.builder()
+          .name(LOCAL)
+          .bootstrapServers(kafka.getBootstrapServers())
+          .properties(new Properties())
+          .build();
+
+  private final String groupId = "OffsetsResetServiceTestGroup-" + UUID.randomUUID();
+  private final String topic = "OffsetsResetServiceTestTopic-" + UUID.randomUUID();
+
+  private KafkaService kafkaService;
+  private OffsetsResetService offsetsResetService;
+
+  @BeforeEach
+  void init() {
+    kafkaService = new KafkaService(null, null, null, null);
+    kafkaService.setClientTimeout(5_000);
+    offsetsResetService = new OffsetsResetService(kafkaService);
+
+    createTopic(new NewTopic(topic, PARTITIONS, (short) 1));
+    createConsumerGroup();
+  }
+
+  @AfterEach
+  void cleanUp() {
+    deleteTopic(topic);
+  }
+
+  private void createConsumerGroup() {
+    try (var consumer = groupConsumer()) {
+      consumer.subscribe(Pattern.compile("no-such-topic-pattern"));
+      consumer.poll(Duration.ofMillis(200));
+      consumer.commitSync();
+    }
+  }
+
+  @Test
+  void failsIfGroupDoesNotExists() {
+    assertThatThrownBy(
+        () -> offsetsResetService.resetToEarliest(CLUSTER, "non-existing-group", topic, null))
+        .isInstanceOf(NotFoundException.class);
+    assertThatThrownBy(
+        () -> offsetsResetService.resetToLatest(CLUSTER, "non-existing-group", topic, null))
+        .isInstanceOf(NotFoundException.class);
+    assertThatThrownBy(() -> offsetsResetService
+        .resetToTimestamp(CLUSTER, "non-existing-group", topic, null, System.currentTimeMillis()))
+        .isInstanceOf(NotFoundException.class);
+    assertThatThrownBy(
+        () -> offsetsResetService.resetToOffsets(CLUSTER, "non-existing-group", topic, Map.of()))
+        .isInstanceOf(NotFoundException.class);
+  }
+
+  @Test
+  void failsIfGroupIsActive() {
+    // starting consumer to activate group
+    try (var consumer = groupConsumer()) {
+      consumer.subscribe(Pattern.compile("no-such-topic-pattern"));
+      consumer.poll(Duration.ofMillis(100));
+
+      assertThatThrownBy(() -> offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null))
+          .isInstanceOf(ValidationException.class);
+      assertThatThrownBy(() -> offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null))
+          .isInstanceOf(ValidationException.class);
+      assertThatThrownBy(() -> offsetsResetService
+          .resetToTimestamp(CLUSTER, groupId, topic, null, System.currentTimeMillis()))
+          .isInstanceOf(ValidationException.class);
+      assertThatThrownBy(
+          () -> offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, Map.of()))
+          .isInstanceOf(ValidationException.class);
+    }
+  }
+
+  @Test
+  void resetToOffsets() {
+    sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
+
+    var expectedOffsets = Map.of(0, 5L, 1, 5L, 2, 5L);
+    offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, expectedOffsets);
+    assertOffsets(expectedOffsets);
+  }
+
+  @Test
+  void resetToOffsetsCommitsEarliestOrLatestOffsetsIfOffsetsBoundsNotValid() {
+    sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
+
+    var offsetsWithInValidBounds = Map.of(0, -2L, 1, 5L, 2, 500L);
+    var expectedOffsets = Map.of(0, 0L, 1, 5L, 2, 10L);
+    offsetsResetService.resetToOffsets(CLUSTER, groupId, topic, offsetsWithInValidBounds);
+    assertOffsets(expectedOffsets);
+  }
+
+  @Test
+  void resetToEarliest() {
+    sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10));
+
+    commit(Map.of(0, 5L, 1, 5L, 2, 5L));
+    offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, List.of(0, 1));
+    assertOffsets(Map.of(0, 0L, 1, 0L, 2, 5L));
+
+    commit(Map.of(0, 5L, 1, 5L, 2, 5L));
+    offsetsResetService.resetToEarliest(CLUSTER, groupId, topic, null);
+    assertOffsets(Map.of(0, 0L, 1, 0L, 2, 0L, 3, 0L, 4, 0L));
+  }
+
+  @Test
+  void resetToLatest() {
+    sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10, 3, 10, 4, 10));
+
+    commit(Map.of(0, 5L, 1, 5L, 2, 5L));
+    offsetsResetService.resetToLatest(CLUSTER, groupId, topic, List.of(0, 1));
+    assertOffsets(Map.of(0, 10L, 1, 10L, 2, 5L));
+
+    commit(Map.of(0, 5L, 1, 5L, 2, 5L));
+    offsetsResetService.resetToLatest(CLUSTER, groupId, topic, null);
+    assertOffsets(Map.of(0, 10L, 1, 10L, 2, 10L, 3, 10L, 4, 10L));
+  }
+
+  @Test
+  void resetToTimestamp() {
+    send(
+        Stream.of(
+            new ProducerRecord<Bytes, Bytes>(topic, 0, 1000L, null, null),
+            new ProducerRecord<Bytes, Bytes>(topic, 0, 1500L, null, null),
+            new ProducerRecord<Bytes, Bytes>(topic, 0, 2000L, null, null),
+            new ProducerRecord<Bytes, Bytes>(topic, 1, 1000L, null, null),
+            new ProducerRecord<Bytes, Bytes>(topic, 1, 2000L, null, null),
+            new ProducerRecord<Bytes, Bytes>(topic, 2, 1000L, null, null),
+            new ProducerRecord<Bytes, Bytes>(topic, 2, 1100L, null, null),
+            new ProducerRecord<Bytes, Bytes>(topic, 2, 1200L, null, null)));
+
+    offsetsResetService.resetToTimestamp(CLUSTER, groupId, topic, List.of(0, 1, 2, 3), 1600L);
+    assertOffsets(Map.of(0, 2L, 1, 1L, 2, 3L, 3, 0L));
+  }
+
+
+  private void commit(Map<Integer, Long> offsetsToCommit) {
+    try (var consumer = groupConsumer()) {
+      consumer.commitSync(
+          offsetsToCommit.entrySet().stream()
+              .collect(Collectors.toMap(
+                  e -> new TopicPartition(topic, e.getKey()),
+                  e -> new OffsetAndMetadata(e.getValue())))
+      );
+    }
+  }
+
+  private void sendMsgsToPartition(Map<Integer, Integer> msgsCountForPartitions) {
+    Bytes bytes = new Bytes("noMatter".getBytes());
+    send(
+        msgsCountForPartitions.entrySet().stream()
+            .flatMap(e ->
+                IntStream.range(0, e.getValue())
+                    .mapToObj(i -> new ProducerRecord<>(topic, e.getKey(), bytes, bytes))));
+  }
+
+  private void send(Stream<ProducerRecord<Bytes, Bytes>> toSend) {
+    var properties = new Properties();
+    properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers());
+    var serializer = new BytesSerializer();
+    try (var producer = new KafkaProducer<>(properties, serializer, serializer)) {
+      toSend.forEach(producer::send);
+      producer.flush();
+    }
+  }
+
+  private void assertOffsets(Map<Integer, Long> expectedOffsets) {
+    try (var consumer = groupConsumer()) {
+      var tps = expectedOffsets.keySet().stream()
+          .map(idx -> new TopicPartition(topic, idx))
+          .collect(Collectors.toSet());
+
+      var actualOffsets = consumer.committed(tps).entrySet().stream()
+          .collect(Collectors.toMap(e -> e.getKey().partition(), e -> e.getValue().offset()));
+
+      assertThat(actualOffsets).isEqualTo(expectedOffsets);
+    }
+  }
+
+  private Consumer<?, ?> groupConsumer() {
+    return kafkaService.createConsumer(CLUSTER, Map.of(ConsumerConfig.GROUP_ID_CONFIG, groupId));
+  }
+
+}

+ 0 - 119
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/OffsetsSeekTest.java

@@ -1,119 +0,0 @@
-package com.provectus.kafka.ui.service;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-import com.provectus.kafka.ui.model.ConsumerPosition;
-import com.provectus.kafka.ui.model.SeekType;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import org.apache.kafka.clients.consumer.ConsumerRecord;
-import org.apache.kafka.clients.consumer.MockConsumer;
-import org.apache.kafka.clients.consumer.OffsetResetStrategy;
-import org.apache.kafka.common.PartitionInfo;
-import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.utils.Bytes;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Nested;
-import org.junit.jupiter.api.Test;
-
-class OffsetsSeekTest {
-
-  String topic = "test";
-  TopicPartition tp0 = new TopicPartition(topic, 0); //offsets: start 0, end 0
-  TopicPartition tp1 = new TopicPartition(topic, 1); //offsets: start 10, end 10
-  TopicPartition tp2 = new TopicPartition(topic, 2); //offsets: start 0, end 20
-  TopicPartition tp3 = new TopicPartition(topic, 3); //offsets: start 25, end 30
-
-  MockConsumer<Bytes, Bytes> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
-
-  @BeforeEach
-  void initConsumer() {
-    consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
-    consumer.updatePartitions(
-        topic,
-        Stream.of(tp0, tp1, tp2, tp3)
-            .map(tp -> new PartitionInfo(topic, tp.partition(), null, null, null, null))
-            .collect(Collectors.toList()));
-    consumer.updateBeginningOffsets(Map.of(
-        tp0, 0L,
-        tp1, 10L,
-        tp2, 0L,
-        tp3, 25L
-    ));
-    consumer.addEndOffsets(Map.of(
-        tp0, 0L,
-        tp1, 10L,
-        tp2, 20L,
-        tp3, 30L
-    ));
-  }
-
-  @Test
-  void seekToBeginningAllPartitions() {
-    var seek = new ConsumingService.OffsetsSeek(
-        topic,
-        new ConsumerPosition(SeekType.BEGINNING, Map.of(0, 0L, 1, 0L)));
-    seek.assignAndSeek(consumer);
-    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1);
-    assertThat(consumer.position(tp0)).isZero();
-    assertThat(consumer.position(tp1)).isEqualTo(10L);
-  }
-
-  @Test
-  void seekToBeginningWithPartitionsList() {
-    var seek = new ConsumingService.OffsetsSeek(
-        topic,
-        new ConsumerPosition(SeekType.BEGINNING, Map.of()));
-    seek.assignAndSeek(consumer);
-    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
-    assertThat(consumer.position(tp0)).isZero();
-    assertThat(consumer.position(tp1)).isEqualTo(10L);
-    assertThat(consumer.position(tp2)).isZero();
-    assertThat(consumer.position(tp3)).isEqualTo(25L);
-  }
-
-  @Test
-  void seekToOffset() {
-    var seek = new ConsumingService.OffsetsSeek(
-        topic,
-        new ConsumerPosition(SeekType.OFFSET, Map.of(0, 0L, 1, 1L, 2, 2L)));
-    seek.assignAndSeek(consumer);
-    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2);
-    assertThat(consumer.position(tp0)).isZero();
-    assertThat(consumer.position(tp1)).isEqualTo(1L);
-    assertThat(consumer.position(tp2)).isEqualTo(2L);
-  }
-
-  @Nested
-  class WaitingOffsetsTest {
-
-    ConsumingService.OffsetsSeek.WaitingOffsets offsets;
-
-    @BeforeEach
-    void assignAndCreateOffsets() {
-      consumer.assign(List.of(tp0, tp1, tp2, tp3));
-      offsets = new ConsumingService.OffsetsSeek.WaitingOffsets(topic, consumer);
-    }
-
-    @Test
-    void collectsSignificantOffsetsMinus1ForAssignedPartitions() {
-      // offsets for partition 0 & 1 should be skipped because they
-      // effectively contains no data (start offset = end offset)
-      assertThat(offsets.offsets).containsExactlyInAnyOrderEntriesOf(
-          Map.of(2, 19L, 3, 29L)
-      );
-    }
-
-    @Test
-    void returnTrueWhenOffsetsReachedReached() {
-      assertThat(offsets.endReached()).isFalse();
-      offsets.markPolled(new ConsumerRecord<>(topic, 2, 19, null, null));
-      assertThat(offsets.endReached()).isFalse();
-      offsets.markPolled(new ConsumerRecord<>(topic, 3, 29, null, null));
-      assertThat(offsets.endReached()).isTrue();
-    }
-  }
-
-}

+ 203 - 31
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/RecordEmitterTest.java

@@ -1,26 +1,33 @@
 package com.provectus.kafka.ui.service;
 package com.provectus.kafka.ui.service;
 
 
-import static com.provectus.kafka.ui.service.ConsumingService.OffsetsSeek;
-import static com.provectus.kafka.ui.service.ConsumingService.RecordEmitter;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.assertj.core.api.Assertions.assertThat;
 
 
 import com.provectus.kafka.ui.AbstractBaseTest;
 import com.provectus.kafka.ui.AbstractBaseTest;
+import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
+import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
 import com.provectus.kafka.ui.model.ConsumerPosition;
 import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.SeekDirection;
 import com.provectus.kafka.ui.model.SeekType;
 import com.provectus.kafka.ui.model.SeekType;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
 import com.provectus.kafka.ui.producer.KafkaTestProducer;
+import com.provectus.kafka.ui.util.OffsetsSeekBackward;
+import com.provectus.kafka.ui.util.OffsetsSeekForward;
+import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.Properties;
 import java.util.UUID;
 import java.util.UUID;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.stream.Collectors;
 import java.util.stream.Collectors;
 import lombok.Value;
 import lombok.Value;
+import lombok.extern.log4j.Log4j2;
 import org.apache.kafka.clients.admin.NewTopic;
 import org.apache.kafka.clients.admin.NewTopic;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.apache.kafka.clients.consumer.ConsumerConfig;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.ConsumerRecord;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.consumer.KafkaConsumer;
 import org.apache.kafka.clients.producer.ProducerRecord;
 import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.serialization.BytesDeserializer;
 import org.apache.kafka.common.serialization.BytesDeserializer;
 import org.apache.kafka.common.serialization.StringDeserializer;
 import org.apache.kafka.common.serialization.StringDeserializer;
 import org.apache.kafka.common.utils.Bytes;
 import org.apache.kafka.common.utils.Bytes;
@@ -29,6 +36,7 @@ import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Test;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Flux;
 
 
+@Log4j2
 class RecordEmitterTest extends AbstractBaseTest {
 class RecordEmitterTest extends AbstractBaseTest {
 
 
   static final int PARTITIONS = 5;
   static final int PARTITIONS = 5;
@@ -49,7 +57,12 @@ class RecordEmitterTest extends AbstractBaseTest {
           var value = "msg_" + partition + "_" + i;
           var value = "msg_" + partition + "_" + i;
           var metadata =
           var metadata =
               producer.send(new ProducerRecord<>(TOPIC, partition, ts, null, value)).get();
               producer.send(new ProducerRecord<>(TOPIC, partition, ts, null, value)).get();
-          SENT_RECORDS.add(new Record(value, metadata.partition(), metadata.offset(), ts));
+          SENT_RECORDS.add(new Record(
+              value,
+              new TopicPartition(metadata.topic(), metadata.partition()),
+              metadata.offset(),
+              ts)
+          );
         }
         }
       }
       }
     }
     }
@@ -63,25 +76,65 @@ class RecordEmitterTest extends AbstractBaseTest {
 
 
   @Test
   @Test
   void pollNothingOnEmptyTopic() {
   void pollNothingOnEmptyTopic() {
-    var emitter = new RecordEmitter(
+    var forwardEmitter = new ForwardRecordEmitter(
         this::createConsumer,
         this::createConsumer,
-        new OffsetsSeek(EMPTY_TOPIC, new ConsumerPosition(SeekType.BEGINNING, Map.of())));
+        new OffsetsSeekForward(EMPTY_TOPIC,
+            new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD)
+        )
+    );
 
 
-    Long polledValues = Flux.create(emitter)
+    var backwardEmitter = new BackwardRecordEmitter(
+        this::createConsumer,
+        new OffsetsSeekBackward(
+            EMPTY_TOPIC,
+            new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.BACKWARD),
+            100
+        )
+    );
+
+    Long polledValues = Flux.create(forwardEmitter)
         .limitRequest(100)
         .limitRequest(100)
         .count()
         .count()
         .block();
         .block();
 
 
     assertThat(polledValues).isZero();
     assertThat(polledValues).isZero();
+
+    polledValues = Flux.create(backwardEmitter)
+        .limitRequest(100)
+        .count()
+        .block();
+
+    assertThat(polledValues).isZero();
+
   }
   }
 
 
   @Test
   @Test
   void pollFullTopicFromBeginning() {
   void pollFullTopicFromBeginning() {
-    var emitter = new RecordEmitter(
+    var forwardEmitter = new ForwardRecordEmitter(
         this::createConsumer,
         this::createConsumer,
-        new OffsetsSeek(TOPIC, new ConsumerPosition(SeekType.BEGINNING, Map.of())));
+        new OffsetsSeekForward(TOPIC,
+            new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD)
+        )
+    );
+
+    var backwardEmitter = new BackwardRecordEmitter(
+        this::createConsumer,
+        new OffsetsSeekBackward(TOPIC,
+            new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD),
+            PARTITIONS * MSGS_PER_PARTITION
+        )
+    );
+
+    var polledValues = Flux.create(forwardEmitter)
+        .map(this::deserialize)
+        .limitRequest(Long.MAX_VALUE)
+        .collect(Collectors.toList())
+        .block();
+
+    assertThat(polledValues).containsExactlyInAnyOrderElementsOf(
+        SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()));
 
 
-    var polledValues = Flux.create(emitter)
+    polledValues = Flux.create(backwardEmitter)
         .map(this::deserialize)
         .map(this::deserialize)
         .limitRequest(Long.MAX_VALUE)
         .limitRequest(Long.MAX_VALUE)
         .collect(Collectors.toList())
         .collect(Collectors.toList())
@@ -89,70 +142,189 @@ class RecordEmitterTest extends AbstractBaseTest {
 
 
     assertThat(polledValues).containsExactlyInAnyOrderElementsOf(
     assertThat(polledValues).containsExactlyInAnyOrderElementsOf(
         SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()));
         SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList()));
+
   }
   }
 
 
   @Test
   @Test
   void pollWithOffsets() {
   void pollWithOffsets() {
-    Map<Integer, Long> targetOffsets = new HashMap<>();
+    Map<TopicPartition, Long> targetOffsets = new HashMap<>();
     for (int i = 0; i < PARTITIONS; i++) {
     for (int i = 0; i < PARTITIONS; i++) {
       long offset = ThreadLocalRandom.current().nextLong(MSGS_PER_PARTITION);
       long offset = ThreadLocalRandom.current().nextLong(MSGS_PER_PARTITION);
-      targetOffsets.put(i, offset);
+      targetOffsets.put(new TopicPartition(TOPIC, i), offset);
     }
     }
 
 
-    var emitter = new RecordEmitter(
+    var forwardEmitter = new ForwardRecordEmitter(
         this::createConsumer,
         this::createConsumer,
-        new OffsetsSeek(TOPIC, new ConsumerPosition(SeekType.OFFSET, targetOffsets)));
+        new OffsetsSeekForward(TOPIC,
+            new ConsumerPosition(SeekType.OFFSET, targetOffsets, SeekDirection.FORWARD)
+        )
+    );
 
 
-    var polledValues = Flux.create(emitter)
+    var backwardEmitter = new BackwardRecordEmitter(
+        this::createConsumer,
+        new OffsetsSeekBackward(TOPIC,
+            new ConsumerPosition(SeekType.OFFSET, targetOffsets, SeekDirection.BACKWARD),
+            PARTITIONS * MSGS_PER_PARTITION
+        )
+    );
+
+    var polledValues = Flux.create(forwardEmitter)
         .map(this::deserialize)
         .map(this::deserialize)
         .limitRequest(Long.MAX_VALUE)
         .limitRequest(Long.MAX_VALUE)
         .collect(Collectors.toList())
         .collect(Collectors.toList())
         .block();
         .block();
 
 
     var expectedValues = SENT_RECORDS.stream()
     var expectedValues = SENT_RECORDS.stream()
-        .filter(r -> r.getOffset() >= targetOffsets.get(r.getPartition()))
+        .filter(r -> r.getOffset() >= targetOffsets.get(r.getTp()))
         .map(Record::getValue)
         .map(Record::getValue)
         .collect(Collectors.toList());
         .collect(Collectors.toList());
 
 
     assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
     assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
+
+    expectedValues = SENT_RECORDS.stream()
+        .filter(r -> r.getOffset() < targetOffsets.get(r.getTp()))
+        .map(Record::getValue)
+        .collect(Collectors.toList());
+
+    polledValues =  Flux.create(backwardEmitter)
+        .map(this::deserialize)
+        .limitRequest(Long.MAX_VALUE)
+        .collect(Collectors.toList())
+        .block();
+
+    assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
   }
   }
 
 
   @Test
   @Test
   void pollWithTimestamps() {
   void pollWithTimestamps() {
-    Map<Integer, Long> targetTimestamps = new HashMap<>();
+    Map<TopicPartition, Long> targetTimestamps = new HashMap<>();
+    final Map<TopicPartition, List<Record>> perPartition =
+        SENT_RECORDS.stream().collect(Collectors.groupingBy((r) -> r.tp));
     for (int i = 0; i < PARTITIONS; i++) {
     for (int i = 0; i < PARTITIONS; i++) {
-      int randRecordIdx = ThreadLocalRandom.current().nextInt(SENT_RECORDS.size());
-      targetTimestamps.put(i, SENT_RECORDS.get(randRecordIdx).getTimestamp());
+      final List<Record> records = perPartition.get(new TopicPartition(TOPIC, i));
+      int randRecordIdx = ThreadLocalRandom.current().nextInt(records.size());
+      log.info("partition: {} position: {}", i, randRecordIdx);
+      targetTimestamps.put(
+          new TopicPartition(TOPIC, i),
+          records.get(randRecordIdx).getTimestamp()
+      );
     }
     }
 
 
-    var emitter = new RecordEmitter(
+    var forwardEmitter = new ForwardRecordEmitter(
         this::createConsumer,
         this::createConsumer,
-        new OffsetsSeek(TOPIC, new ConsumerPosition(SeekType.TIMESTAMP, targetTimestamps)));
+        new OffsetsSeekForward(TOPIC,
+            new ConsumerPosition(SeekType.TIMESTAMP, targetTimestamps, SeekDirection.FORWARD)
+        )
+    );
+
+    var backwardEmitter = new BackwardRecordEmitter(
+        this::createConsumer,
+        new OffsetsSeekBackward(TOPIC,
+            new ConsumerPosition(SeekType.TIMESTAMP, targetTimestamps, SeekDirection.BACKWARD),
+            PARTITIONS * MSGS_PER_PARTITION
+        )
+    );
 
 
-    var polledValues = Flux.create(emitter)
+    var polledValues = Flux.create(forwardEmitter)
         .map(this::deserialize)
         .map(this::deserialize)
         .limitRequest(Long.MAX_VALUE)
         .limitRequest(Long.MAX_VALUE)
         .collect(Collectors.toList())
         .collect(Collectors.toList())
         .block();
         .block();
 
 
     var expectedValues = SENT_RECORDS.stream()
     var expectedValues = SENT_RECORDS.stream()
-        .filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getPartition()))
+        .filter(r -> r.getTimestamp() >= targetTimestamps.get(r.getTp()))
         .map(Record::getValue)
         .map(Record::getValue)
         .collect(Collectors.toList());
         .collect(Collectors.toList());
 
 
     assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
     assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
+
+    polledValues = Flux.create(backwardEmitter)
+        .map(this::deserialize)
+        .limitRequest(Long.MAX_VALUE)
+        .collect(Collectors.toList())
+        .block();
+
+    expectedValues = SENT_RECORDS.stream()
+        .filter(r -> r.getTimestamp() < targetTimestamps.get(r.getTp()))
+        .map(Record::getValue)
+        .collect(Collectors.toList());
+
+    assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
+
   }
   }
 
 
-  private KafkaConsumer<Bytes, Bytes> createConsumer() {
-    return new KafkaConsumer<>(
-        Map.of(
-            ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(),
-            ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(),
-            ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 20, // to check multiple polls
-            ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class,
-            ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class
+  @Test
+  void backwardEmitterSeekToEnd() {
+    final int numMessages = 100;
+    final Map<TopicPartition, Long> targetOffsets = new HashMap<>();
+    for (int i = 0; i < PARTITIONS; i++) {
+      targetOffsets.put(new TopicPartition(TOPIC, i), (long) MSGS_PER_PARTITION);
+    }
+
+    var backwardEmitter = new BackwardRecordEmitter(
+        this::createConsumer,
+        new OffsetsSeekBackward(TOPIC,
+            new ConsumerPosition(SeekType.OFFSET, targetOffsets, SeekDirection.BACKWARD),
+            numMessages
         )
         )
     );
     );
+
+    var polledValues = Flux.create(backwardEmitter)
+        .map(this::deserialize)
+        .limitRequest(numMessages)
+        .collect(Collectors.toList())
+        .block();
+
+    var expectedValues = SENT_RECORDS.stream()
+        .filter(r -> r.getOffset() < targetOffsets.get(r.getTp()))
+        .filter(r -> r.getOffset() >= (targetOffsets.get(r.getTp()) - (100 / PARTITIONS)))
+        .map(Record::getValue)
+        .collect(Collectors.toList());
+
+
+    assertThat(polledValues).containsExactlyInAnyOrderElementsOf(expectedValues);
+  }
+
+  @Test
+  void backwardEmitterSeekToBegin() {
+    Map<TopicPartition, Long> offsets = new HashMap<>();
+    for (int i = 0; i < PARTITIONS; i++) {
+      offsets.put(new TopicPartition(TOPIC, i), 0L);
+    }
+
+    var backwardEmitter = new BackwardRecordEmitter(
+        this::createConsumer,
+        new OffsetsSeekBackward(TOPIC,
+            new ConsumerPosition(SeekType.OFFSET, offsets, SeekDirection.BACKWARD),
+            100
+        )
+    );
+
+    var polledValues = Flux.create(backwardEmitter)
+        .map(this::deserialize)
+        .limitRequest(Long.MAX_VALUE)
+        .collect(Collectors.toList())
+        .block();
+
+    assertThat(polledValues).isEmpty();
+  }
+
+  private KafkaConsumer<Bytes, Bytes> createConsumer() {
+    return createConsumer(Map.of());
+  }
+
+  private KafkaConsumer<Bytes, Bytes> createConsumer(Map<String, Object> properties) {
+    final Map<String, ? extends Serializable> map = Map.of(
+        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(),
+        ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(),
+        ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 20, // to check multiple polls
+        ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class,
+        ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class
+    );
+    Properties props = new Properties();
+    props.putAll(map);
+    props.putAll(properties);
+    return new KafkaConsumer<>(props);
   }
   }
 
 
   private String deserialize(ConsumerRecord<Bytes, Bytes> rec) {
   private String deserialize(ConsumerRecord<Bytes, Bytes> rec) {
@@ -162,7 +334,7 @@ class RecordEmitterTest extends AbstractBaseTest {
   @Value
   @Value
   static class Record {
   static class Record {
     String value;
     String value;
-    int partition;
+    TopicPartition tp;
     long offset;
     long offset;
     long timestamp;
     long timestamp;
   }
   }

+ 360 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/SendAndReadTests.java

@@ -0,0 +1,360 @@
+package com.provectus.kafka.ui.service;
+
+import static org.assertj.core.api.Assertions.assertThat;
+import static org.assertj.core.api.Assertions.assertThatThrownBy;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.provectus.kafka.ui.AbstractBaseTest;
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.CreateTopicMessage;
+import com.provectus.kafka.ui.model.SeekDirection;
+import com.provectus.kafka.ui.model.SeekType;
+import com.provectus.kafka.ui.model.TopicMessage;
+import io.confluent.kafka.schemaregistry.ParsedSchema;
+import io.confluent.kafka.schemaregistry.avro.AvroSchema;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
+import java.time.Duration;
+import java.util.Map;
+import java.util.Objects;
+import java.util.UUID;
+import java.util.function.Consumer;
+import lombok.SneakyThrows;
+import org.apache.kafka.clients.admin.NewTopic;
+import org.apache.kafka.common.TopicPartition;
+import org.junit.Assert;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+import org.springframework.beans.factory.annotation.Autowired;
+
+public class SendAndReadTests extends AbstractBaseTest {
+
+  private static final AvroSchema AVRO_SCHEMA_1 = new AvroSchema(
+      "{"
+          + "  \"type\": \"record\","
+          + "  \"name\": \"TestAvroRecord1\","
+          + "  \"fields\": ["
+          + "    {"
+          + "      \"name\": \"field1\","
+          + "      \"type\": \"string\""
+          + "    },"
+          + "    {"
+          + "      \"name\": \"field2\","
+          + "      \"type\": \"int\""
+          + "    }"
+          + "  ]"
+          + "}"
+  );
+
+  private static final AvroSchema AVRO_SCHEMA_2 = new AvroSchema(
+      "{"
+          + "  \"type\": \"record\","
+          + "  \"name\": \"TestAvroRecord2\","
+          + "  \"fields\": ["
+          + "    {"
+          + "      \"name\": \"f1\","
+          + "      \"type\": \"int\""
+          + "    },"
+          + "    {"
+          + "      \"name\": \"f2\","
+          + "      \"type\": \"string\""
+          + "    }"
+          + "  ]"
+          + "}"
+  );
+
+  private static final String AVRO_SCHEMA_1_JSON_RECORD
+      = "{ \"field1\":\"testStr\", \"field2\": 123 }";
+
+  private static final String AVRO_SCHEMA_2_JSON_RECORD = "{ \"f1\": 111, \"f2\": \"testStr\" }";
+
+  private static final ProtobufSchema PROTOBUF_SCHEMA = new ProtobufSchema(
+      "syntax = \"proto3\";\n"
+          + "package com.provectus;\n"
+          + "\n"
+          + "message TestProtoRecord {\n"
+          + "  string f1 = 1;\n"
+          + "  int32 f2 = 2;\n"
+          + "}\n"
+          + "\n"
+  );
+
+  private static final String PROTOBUF_SCHEMA_JSON_RECORD
+      = "{ \"f1\" : \"test str\", \"f2\" : 123 }";
+
+  @Autowired
+  private ClusterService clusterService;
+
+  @Autowired
+  private ClustersMetricsScheduler clustersMetricsScheduler;
+
+  @Test
+  void noSchemaStringKeyStringValue() {
+    new SendAndReadSpec()
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key("testKey")
+                .content("testValue")
+        )
+        .doAssert(polled -> {
+          assertThat(polled.getKey()).isEqualTo("testKey");
+          assertThat(polled.getContent()).isEqualTo("testValue");
+        });
+  }
+
+  @Test
+  void noSchemaJsonKeyJsonValue() {
+    new SendAndReadSpec()
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key("{ \"f1\": 111, \"f2\": \"testStr1\" }")
+                .content("{ \"f1\": 222, \"f2\": \"testStr2\" }")
+        )
+        .doAssert(polled -> {
+          assertThat(polled.getKey()).isEqualTo("{ \"f1\": 111, \"f2\": \"testStr1\" }");
+          assertThat(polled.getContent()).isEqualTo("{ \"f1\": 222, \"f2\": \"testStr2\" }");
+        });
+  }
+
+  @Test
+  void keyIsIntValueIsDoubleShouldBeSerializedAsStrings() {
+    new SendAndReadSpec()
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key("123")
+                .content("234.56")
+        )
+        .doAssert(polled -> {
+          assertThat(polled.getKey()).isEqualTo("123");
+          assertThat(polled.getContent()).isEqualTo("234.56");
+        });
+  }
+
+  @Test
+  void noSchemaKeyIsNull() {
+    new SendAndReadSpec()
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key(null)
+                .content("testValue")
+        )
+        .doAssert(polled -> {
+          assertThat(polled.getKey()).isNull();
+          assertThat(polled.getContent()).isEqualTo("testValue");
+        });
+  }
+
+  @Test
+  void noSchemaValueIsNull() {
+    new SendAndReadSpec()
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key("testKey")
+                .content(null)
+        )
+        .doAssert(polled -> {
+          assertThat(polled.getKey()).isEqualTo("testKey");
+          assertThat(polled.getContent()).isNull();
+        });
+  }
+
+  @Test
+  void nonNullableKvWithAvroSchema() {
+    new SendAndReadSpec()
+        .withKeySchema(AVRO_SCHEMA_1)
+        .withValueSchema(AVRO_SCHEMA_2)
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key(AVRO_SCHEMA_1_JSON_RECORD)
+                .content(AVRO_SCHEMA_2_JSON_RECORD)
+        )
+        .doAssert(polled -> {
+          assertJsonEqual(polled.getKey(), AVRO_SCHEMA_1_JSON_RECORD);
+          assertJsonEqual(polled.getContent(), AVRO_SCHEMA_2_JSON_RECORD);
+        });
+  }
+
+  @Test
+  void keyWithNoSchemaValueWithAvroSchema() {
+    new SendAndReadSpec()
+        .withValueSchema(AVRO_SCHEMA_1)
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key("testKey")
+                .content(AVRO_SCHEMA_1_JSON_RECORD)
+        )
+        .doAssert(polled -> {
+          assertThat(polled.getKey()).isEqualTo("testKey");
+          assertJsonEqual(polled.getContent(), AVRO_SCHEMA_1_JSON_RECORD);
+        });
+  }
+
+  @Test
+  void keyWithAvroSchemaValueWithNoSchema() {
+    new SendAndReadSpec()
+        .withKeySchema(AVRO_SCHEMA_1)
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key(AVRO_SCHEMA_1_JSON_RECORD)
+                .content("testVal")
+        )
+        .doAssert(polled -> {
+          assertJsonEqual(polled.getKey(), AVRO_SCHEMA_1_JSON_RECORD);
+          assertThat(polled.getContent()).isEqualTo("testVal");
+        });
+  }
+
+  @Test
+  void keyWithNoSchemaValueWithProtoSchema() {
+    new SendAndReadSpec()
+        .withValueSchema(PROTOBUF_SCHEMA)
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key("testKey")
+                .content(PROTOBUF_SCHEMA_JSON_RECORD)
+        )
+        .doAssert(polled -> {
+          assertThat(polled.getKey()).isEqualTo("testKey");
+          assertJsonEqual(polled.getContent(), PROTOBUF_SCHEMA_JSON_RECORD);
+        });
+  }
+
+  @Test
+  void keyWithAvroSchemaValueWithAvroSchemaKeyIsNull() {
+    new SendAndReadSpec()
+        .withKeySchema(AVRO_SCHEMA_1)
+        .withValueSchema(AVRO_SCHEMA_2)
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key(null)
+                .content(AVRO_SCHEMA_2_JSON_RECORD)
+        )
+        .doAssert(polled -> {
+          assertThat(polled.getKey()).isNull();
+          assertJsonEqual(polled.getContent(), AVRO_SCHEMA_2_JSON_RECORD);
+        });
+  }
+
+  @Test
+  void valueWithAvroSchemaShouldThrowExceptionArgIsNotValidJsonObject() {
+    assertThatThrownBy(() -> {
+      new SendAndReadSpec()
+          .withValueSchema(AVRO_SCHEMA_2)
+          .withMsgToSend(
+              new CreateTopicMessage()
+                  .content("not a json object")
+          )
+          .doAssert(polled -> Assertions.fail());
+    }).hasMessageContaining("Failed to serialize record");
+  }
+
+  @Test
+  void keyWithAvroSchemaValueWithAvroSchemaValueIsNull() {
+    new SendAndReadSpec()
+        .withKeySchema(AVRO_SCHEMA_1)
+        .withValueSchema(AVRO_SCHEMA_2)
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key(AVRO_SCHEMA_1_JSON_RECORD)
+                .content(null)
+        )
+        .doAssert(polled -> {
+          assertJsonEqual(polled.getKey(), AVRO_SCHEMA_1_JSON_RECORD);
+          assertThat(polled.getContent()).isNull();
+        });
+  }
+
+  @Test
+  void keyWithAvroSchemaValueWithProtoSchema() {
+    new SendAndReadSpec()
+        .withKeySchema(AVRO_SCHEMA_1)
+        .withValueSchema(PROTOBUF_SCHEMA)
+        .withMsgToSend(
+            new CreateTopicMessage()
+                .key(AVRO_SCHEMA_1_JSON_RECORD)
+                .content(PROTOBUF_SCHEMA_JSON_RECORD)
+        )
+        .doAssert(polled -> {
+          assertJsonEqual(polled.getKey(), AVRO_SCHEMA_1_JSON_RECORD);
+          assertJsonEqual(polled.getContent(), PROTOBUF_SCHEMA_JSON_RECORD);
+        });
+  }
+
+  @Test
+  void valueWithProtoSchemaShouldThrowExceptionArgIsNotValidJsonObject() {
+    assertThatThrownBy(() -> {
+      new SendAndReadSpec()
+          .withValueSchema(PROTOBUF_SCHEMA)
+          .withMsgToSend(
+              new CreateTopicMessage()
+                  .content("not a json object")
+          )
+          .doAssert(polled -> Assertions.fail());
+    }).hasMessageContaining("Failed to serialize record");
+  }
+
+
+  @SneakyThrows
+  private void assertJsonEqual(String actual, String expected) {
+    var mapper = new ObjectMapper();
+    assertThat(mapper.readTree(actual)).isEqualTo(mapper.readTree(expected));
+  }
+
+  class SendAndReadSpec {
+    CreateTopicMessage msgToSend;
+    ParsedSchema keySchema;
+    ParsedSchema valueSchema;
+
+    public SendAndReadSpec withMsgToSend(CreateTopicMessage msg) {
+      this.msgToSend = msg;
+      return this;
+    }
+
+    public SendAndReadSpec withKeySchema(ParsedSchema keyScheam) {
+      this.keySchema = keyScheam;
+      return this;
+    }
+
+    public SendAndReadSpec withValueSchema(ParsedSchema valueSchema) {
+      this.valueSchema = valueSchema;
+      return this;
+    }
+
+    @SneakyThrows
+    public void doAssert(Consumer<TopicMessage> msgAssert) {
+      Objects.requireNonNull(msgToSend);
+      String topic = UUID.randomUUID().toString();
+      createTopic(new NewTopic(topic, 1, (short) 1));
+      if (keySchema != null) {
+        schemaRegistry.schemaRegistryClient().register(topic + "-key", keySchema);
+      }
+      if (valueSchema != null) {
+        schemaRegistry.schemaRegistryClient().register(topic + "-value", valueSchema);
+      }
+
+      // need to update to see new topic & schemas
+      clustersMetricsScheduler.updateMetrics();
+      try {
+        clusterService.sendMessage(LOCAL, topic, msgToSend).block();
+        TopicMessage polled = clusterService.getMessages(
+            LOCAL,
+            topic,
+            new ConsumerPosition(
+                SeekType.BEGINNING,
+                Map.of(new TopicPartition(topic, 0), 0L),
+                SeekDirection.FORWARD
+            ),
+            null,
+            1
+        ).blockLast(Duration.ofSeconds(5));
+
+        assertThat(polled).isNotNull();
+        assertThat(polled.getPartition()).isEqualTo(0);
+        assertThat(polled.getOffset()).isNotNull();
+        msgAssert.accept(polled);
+      } finally {
+        deleteTopic(topic);
+      }
+    }
+  }
+
+}

+ 196 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/OffsetsSeekTest.java

@@ -0,0 +1,196 @@
+package com.provectus.kafka.ui.util;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.model.ConsumerPosition;
+import com.provectus.kafka.ui.model.SeekDirection;
+import com.provectus.kafka.ui.model.SeekType;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.MockConsumer;
+import org.apache.kafka.clients.consumer.OffsetResetStrategy;
+import org.apache.kafka.common.PartitionInfo;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.utils.Bytes;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Nested;
+import org.junit.jupiter.api.Test;
+
+class OffsetsSeekTest {
+
+  final String topic = "test";
+  final TopicPartition tp0 = new TopicPartition(topic, 0); //offsets: start 0, end 0
+  final TopicPartition tp1 = new TopicPartition(topic, 1); //offsets: start 10, end 10
+  final TopicPartition tp2 = new TopicPartition(topic, 2); //offsets: start 0, end 20
+  final TopicPartition tp3 = new TopicPartition(topic, 3); //offsets: start 25, end 30
+
+  MockConsumer<Bytes, Bytes> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
+
+  @BeforeEach
+  void initConsumer() {
+    consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
+    consumer.updatePartitions(
+        topic,
+        Stream.of(tp0, tp1, tp2, tp3)
+            .map(tp -> new PartitionInfo(topic, tp.partition(), null, null, null, null))
+            .collect(Collectors.toList()));
+    consumer.updateBeginningOffsets(Map.of(
+        tp0, 0L,
+        tp1, 10L,
+        tp2, 0L,
+        tp3, 25L
+    ));
+    consumer.addEndOffsets(Map.of(
+        tp0, 0L,
+        tp1, 10L,
+        tp2, 20L,
+        tp3, 30L
+    ));
+  }
+
+  @Test
+  void forwardSeekToBeginningAllPartitions() {
+    var seek = new OffsetsSeekForward(
+        topic,
+        new ConsumerPosition(
+            SeekType.BEGINNING,
+            Map.of(tp0, 0L, tp1, 0L),
+            SeekDirection.FORWARD
+        )
+    );
+
+    seek.assignAndSeek(consumer);
+    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1);
+    assertThat(consumer.position(tp0)).isZero();
+    assertThat(consumer.position(tp1)).isEqualTo(10L);
+  }
+
+  @Test
+  void backwardSeekToBeginningAllPartitions() {
+    var seek = new OffsetsSeekBackward(
+        topic,
+        new ConsumerPosition(
+            SeekType.BEGINNING,
+            Map.of(tp2, 0L, tp3, 0L),
+            SeekDirection.BACKWARD
+        ),
+        10
+    );
+
+    seek.assignAndSeek(consumer);
+    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2, tp3);
+    assertThat(consumer.position(tp2)).isEqualTo(20L);
+    assertThat(consumer.position(tp3)).isEqualTo(30L);
+  }
+
+  @Test
+  void forwardSeekToBeginningWithPartitionsList() {
+    var seek = new OffsetsSeekForward(
+        topic,
+        new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.FORWARD));
+    seek.assignAndSeek(consumer);
+    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
+    assertThat(consumer.position(tp0)).isZero();
+    assertThat(consumer.position(tp1)).isEqualTo(10L);
+    assertThat(consumer.position(tp2)).isZero();
+    assertThat(consumer.position(tp3)).isEqualTo(25L);
+  }
+
+  @Test
+  void backwardSeekToBeginningWithPartitionsList() {
+    var seek = new OffsetsSeekBackward(
+        topic,
+        new ConsumerPosition(SeekType.BEGINNING, Map.of(), SeekDirection.BACKWARD),
+        10
+    );
+    seek.assignAndSeek(consumer);
+    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
+    assertThat(consumer.position(tp0)).isZero();
+    assertThat(consumer.position(tp1)).isEqualTo(10L);
+    assertThat(consumer.position(tp2)).isEqualTo(20L);
+    assertThat(consumer.position(tp3)).isEqualTo(30L);
+  }
+
+
+  @Test
+  void forwardSeekToOffset() {
+    var seek = new OffsetsSeekForward(
+        topic,
+        new ConsumerPosition(
+            SeekType.OFFSET,
+            Map.of(tp0, 0L, tp1, 1L, tp2, 2L),
+            SeekDirection.FORWARD
+        )
+    );
+    seek.assignAndSeek(consumer);
+    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
+    assertThat(consumer.position(tp2)).isEqualTo(2L);
+  }
+
+  @Test
+  void backwardSeekToOffset() {
+    var seek = new OffsetsSeekBackward(
+        topic,
+        new ConsumerPosition(
+            SeekType.OFFSET,
+            Map.of(tp0, 0L, tp1, 1L, tp2, 20L),
+            SeekDirection.BACKWARD
+        ),
+        2
+    );
+    seek.assignAndSeek(consumer);
+    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
+    assertThat(consumer.position(tp2)).isEqualTo(20L);
+  }
+
+  @Test
+  void backwardSeekToOffsetOnlyOnePartition() {
+    var seek = new OffsetsSeekBackward(
+        topic,
+        new ConsumerPosition(
+            SeekType.OFFSET,
+            Map.of(tp2, 20L),
+            SeekDirection.BACKWARD
+        ),
+        20
+    );
+    seek.assignAndSeek(consumer);
+    assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
+    assertThat(consumer.position(tp2)).isEqualTo(20L);
+  }
+
+
+  @Nested
+  class WaitingOffsetsTest {
+
+    OffsetsSeekForward.WaitingOffsets offsets;
+
+    @BeforeEach
+    void assignAndCreateOffsets() {
+      consumer.assign(List.of(tp0, tp1, tp2, tp3));
+      offsets = new OffsetsSeek.WaitingOffsets(topic, consumer, List.of(tp0, tp1, tp2, tp3));
+    }
+
+    @Test
+    void collectsSignificantOffsetsMinus1ForAssignedPartitions() {
+      // offsets for partition 0 & 1 should be skipped because they
+      // effectively contains no data (start offset = end offset)
+      assertThat(offsets.getEndOffsets()).containsExactlyInAnyOrderEntriesOf(
+          Map.of(2, 19L, 3, 29L)
+      );
+    }
+
+    @Test
+    void returnTrueWhenOffsetsReachedReached() {
+      assertThat(offsets.endReached()).isFalse();
+      offsets.markPolled(new ConsumerRecord<>(topic, 2, 19, null, null));
+      assertThat(offsets.endReached()).isFalse();
+      offsets.markPolled(new ConsumerRecord<>(topic, 3, 29, null, null));
+      assertThat(offsets.endReached()).isTrue();
+    }
+  }
+
+}

+ 91 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/AvroJsonSchemaConverterTest.java

@@ -0,0 +1,91 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.net.URI;
+import java.net.URISyntaxException;
+import org.apache.avro.Schema;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+public class AvroJsonSchemaConverterTest {
+  @Test
+  public void avroConvertTest() throws URISyntaxException {
+    final AvroJsonSchemaConverter converter = new AvroJsonSchemaConverter();
+    URI basePath = new URI("http://example.com/");
+
+    Schema recordSchema = (new Schema.Parser()).parse(
+         " {"
+            + "     \"type\": \"record\","
+            + "     \"name\": \"Message\","
+            + "     \"namespace\": \"com.provectus.kafka\","
+            + "     \"fields\": ["
+            + "         {"
+            + "             \"name\": \"record\","
+            + "             \"type\": {"
+            + "                 \"type\": \"record\","
+            + "                 \"name\": \"InnerMessage\","
+            + "                 \"fields\": ["
+            + "                     {"
+            + "                         \"name\": \"id\","
+            + "                         \"type\": \"long\""
+            + "                     },"
+            + "                     {"
+            + "                         \"name\": \"text\","
+            + "                         \"type\": \"string\""
+            + "                     },"
+            + "                     {"
+            + "                         \"name\": \"long_text\","
+            + "                         \"type\": ["
+            + "                             \"null\","
+            + "                             \"string\""
+            + "                         ],"
+            + "                         \"default\": null"
+            + "                     },"
+            + "                     {"
+            + "                         \"name\": \"order\","
+            + "                         \"type\": {"
+            + "                        \"type\": \"enum\","
+            + "                        \"name\": \"Suit\","
+            + "                        \"symbols\": [\"SPADES\",\"HEARTS\",\"DIAMONDS\",\"CLUBS\"]"
+            + "                         }"
+            + "                     },"
+            + "                     {"
+            + "                         \"name\": \"array\","
+            + "                         \"type\": {"
+            + "                             \"type\": \"array\","
+            + "                             \"items\": \"string\","
+            + "                             \"default\": []"
+            + "                         }"
+            + "                     },"
+            + "                     {"
+            + "                         \"name\": \"map\","
+            + "                         \"type\": {"
+            + "                             \"type\": \"map\","
+            + "                             \"values\": \"long\","
+            + "                             \"default\": {}"
+            + "                         }"
+            + "                     }"
+            + "                 ]"
+            + "             }"
+            + "         }"
+            + "     ]"
+            + " }"
+    );
+
+    String expected =
+            "{\"$id\":\"http://example.com/Message\","
+            + "\"$schema\":\"https://json-schema.org/draft/2020-12/schema\","
+            + "\"type\":\"object\",\"properties\":{\"record\":{\"$ref\":"
+            + "\"#/definitions/RecordInnerMessage\"}},\"required\":[\"record\"],"
+            + "\"definitions\":{\"RecordInnerMessage\":{\"type\":\"object\",\"properties\":"
+            + "{\"long_text\":{\"type\":\"string\"},\"array\":{\"type\":\"array\",\"items\":"
+            + "{\"type\":\"string\"}},\"id\":{\"type\":\"integer\"},\"text\":{\"type\":\"string\"},"
+            + "\"map\":{\"type\":\"object\",\"additionalProperties\":{\"type\":\"integer\"}},"
+            + "\"order\":{\"enum\":[\"SPADES\",\"HEARTS\",\"DIAMONDS\",\"CLUBS\"]}},"
+            + "\"required\":[\"id\",\"text\",\"order\",\"array\",\"map\"]}}}";
+
+    final JsonSchema convertRecord = converter.convert(basePath, recordSchema);
+    Assertions.assertEquals(expected, convertRecord.toJson(new ObjectMapper()));
+
+  }
+}

+ 63 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/util/jsonschema/ProtobufSchemaConverterTest.java

@@ -0,0 +1,63 @@
+package com.provectus.kafka.ui.util.jsonschema;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
+import java.net.URI;
+import java.net.URISyntaxException;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
+
+
+public class ProtobufSchemaConverterTest {
+
+  @Test
+  public void testSimpleProto() throws URISyntaxException {
+
+    String proto = "syntax = \"proto3\";\n"
+        + "package com.acme;\n"
+        + "\n"
+        + "message MyRecord {\n"
+        + "  string f1 = 1;\n"
+        + "  OtherRecord f2 = 2;\n"
+        + "  repeated OtherRecord f3 = 3;\n"
+        + "}\n"
+        + "\n"
+        + "message OtherRecord {\n"
+        + "  int32 other_id = 1;\n"
+        + "  Order order = 2;\n"
+        + "  oneof optionalField {"
+        + "    string name = 3;"
+        + "    uint64 size = 4;"
+        + "  }"
+        + "}\n"
+        + "\n"
+        + "enum Order {\n"
+        + "    FIRST = 1;\n"
+        + "    SECOND = 1;\n"
+        + "}\n";
+
+    String expected =
+        "{\"$id\":\"http://example.com/com.acme.MyRecord\","
+        + "\"$schema\":\"https://json-schema.org/draft/2020-12/schema\","
+        + "\"type\":\"object\",\"properties\":{\"f1\":{\"type\":\"string\"},"
+        + "\"f2\":{\"$ref\":\"#/definitions/record.com.acme.OtherRecord\"},"
+        + "\"f3\":{\"type\":\"array\","
+        + "\"items\":{\"$ref\":\"#/definitions/record.com.acme.OtherRecord\"}}},"
+        + "\"required\":[\"f3\"],"
+        + "\"definitions\":"
+        + "{\"record.com.acme.OtherRecord\":"
+        + "{\"type\":\"object\",\"properties\":"
+        + "{\"optionalField\":{\"oneOf\":[{\"type\":\"string\"},"
+        + "{\"type\":\"integer\"}]},\"other_id\":"
+        + "{\"type\":\"integer\"},\"order\":{\"enum\":[\"FIRST\",\"SECOND\"]}}}}}";
+
+    ProtobufSchema protobufSchema = new ProtobufSchema(proto);
+
+    final ProtobufSchemaConverter converter = new ProtobufSchemaConverter();
+    URI basePath = new URI("http://example.com/");
+
+    final JsonSchema convert =
+        converter.convert(basePath, protobufSchema.toDescriptor("MyRecord"));
+    Assertions.assertEquals(expected, convert.toJson(new ObjectMapper()));
+  }
+}

+ 370 - 48
kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml

@@ -31,6 +31,30 @@ paths:
                 items:
                 items:
                   $ref: '#/components/schemas/Cluster'
                   $ref: '#/components/schemas/Cluster'
 
 
+
+  /api/clusters/{clusterName}/cache:
+    post:
+      tags:
+        - Clusters
+      summary: updateClusterInfo
+      operationId: updateClusterInfo
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+      responses:
+        200:
+          description: OK
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/Cluster'
+        404:
+          description: Not found
+
+
   /api/clusters/{clusterName}/brokers:
   /api/clusters/{clusterName}/brokers:
     get:
     get:
       tags:
       tags:
@@ -287,6 +311,38 @@ paths:
                 items:
                 items:
                   $ref: '#/components/schemas/TopicConfig'
                   $ref: '#/components/schemas/TopicConfig'
 
 
+  /api/clusters/{clusterName}/topics/{topicName}/replications:
+    patch:
+      tags:
+        - Topics
+      summary: changeReplicationFactor
+      operationId: changeReplicationFactor
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: topicName
+          in: path
+          required: true
+          schema:
+            type: string
+      requestBody:
+        content:
+          application/json:
+            schema:
+              $ref: '#/components/schemas/ReplicationFactorChange'
+      responses:
+        200:
+          description: OK
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/ReplicationFactorChangeResponse'
+        404:
+          description: Not found
+
   /api/clusters/{clusterName}/topics/{topicName}/messages:
   /api/clusters/{clusterName}/topics/{topicName}/messages:
     get:
     get:
       tags:
       tags:
@@ -314,7 +370,7 @@ paths:
             type: array
             type: array
             items:
             items:
               type: string
               type: string
-          description: The format is [partition]::[offset] for specifying offsets or [partition]::[timstamp in millis] for specifying timestamps
+          description: The format is [partition]::[offset] for specifying offsets or [partition]::[timestamp in millis] for specifying timestamps
         - name: limit
         - name: limit
           in: query
           in: query
           schema:
           schema:
@@ -323,6 +379,10 @@ paths:
           in: query
           in: query
           schema:
           schema:
             type: string
             type: string
+        - name: seekDirection
+          in: query
+          schema:
+            $ref: "#/components/schemas/SeekDirection"
       responses:
       responses:
         200:
         200:
           description: OK
           description: OK
@@ -360,8 +420,59 @@ paths:
           description: OK
           description: OK
         404:
         404:
           description: Not found
           description: Not found
+    post:
+      tags:
+        - Messages
+      summary: sendTopicMessages
+      operationId: sendTopicMessages
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: topicName
+          in: path
+          required: true
+          schema:
+            type: string
+      requestBody:
+        content:
+          application/json:
+            schema:
+              $ref: '#/components/schemas/CreateTopicMessage'
+      responses:
+        200:
+          description: OK
+        404:
+          description: Not found
+
+  /api/clusters/{clusterName}/topics/{topicName}/messages/schema:
+    get:
+      tags:
+        - Messages
+      summary: getTopicSchema
+      operationId: getTopicSchema
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: topicName
+          in: path
+          required: true
+          schema:
+            type: string
+      responses:
+        200:
+          description: OK
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/TopicMessageSchema'
 
 
-  /api/clusters/{clusterName}/topics/{topicName}/consumergroups:
+  /api/clusters/{clusterName}/topics/{topicName}/consumer-groups:
     get:
     get:
       tags:
       tags:
         - Consumer Groups
         - Consumer Groups
@@ -384,7 +495,10 @@ paths:
           content:
           content:
             application/json:
             application/json:
               schema:
               schema:
-                $ref: '#/components/schemas/TopicConsumerGroups'
+                type: array
+                items:
+                  $ref: '#/components/schemas/ConsumerGroup'
+
 
 
   /api/clusters/{clusterName}/consumer-groups/{id}:
   /api/clusters/{clusterName}/consumer-groups/{id}:
     get:
     get:
@@ -411,7 +525,27 @@ paths:
               schema:
               schema:
                 $ref: '#/components/schemas/ConsumerGroupDetails'
                 $ref: '#/components/schemas/ConsumerGroupDetails'
 
 
-  /api/clusters/{clusterName}/consumerGroups:
+    delete:
+      tags:
+        - Consumer Groups
+      summary: Delete Consumer Group by ID
+      operationId: deleteConsumerGroup
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: id
+          in: path
+          required: true
+          schema:
+            type: string
+      responses:
+        200:
+          description: OK
+
+  /api/clusters/{clusterName}/consumer-groups:
     get:
     get:
       tags:
       tags:
         - Consumer Groups
         - Consumer Groups
@@ -433,6 +567,32 @@ paths:
                 items:
                 items:
                   $ref: '#/components/schemas/ConsumerGroup'
                   $ref: '#/components/schemas/ConsumerGroup'
 
 
+  /api/clusters/{clusterName}/consumer-groups/{id}/offsets:
+    post:
+      tags:
+        - Consumer Groups
+      summary: resets consumer group offsets
+      operationId: resetConsumerGroupOffsets
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: id
+          in: path
+          required: true
+          schema:
+            type: string
+      requestBody:
+        content:
+          application/json:
+            schema:
+              $ref: '#/components/schemas/ConsumerGroupOffsetsReset'
+      responses:
+        200:
+          description: OK
+
   /api/clusters/{clusterName}/schemas:
   /api/clusters/{clusterName}/schemas:
     post:
     post:
       tags:
       tags:
@@ -1118,6 +1278,38 @@ paths:
               schema:
               schema:
                 $ref: '#/components/schemas/ConnectorPluginConfigValidationResponse'
                 $ref: '#/components/schemas/ConnectorPluginConfigValidationResponse'
 
 
+  /api/clusters/{clusterName}/topics/{topicName}/partitions:
+    patch:
+      tags:
+        - Topics
+      summary: increaseTopicPartitions
+      operationId: increaseTopicPartitions
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: topicName
+          in: path
+          required: true
+          schema:
+            type: string
+      requestBody:
+        content:
+          application/json:
+            schema:
+              $ref: '#/components/schemas/PartitionsIncrease'
+      responses:
+        200:
+          description: OK
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/PartitionsIncreaseResponse'
+        404:
+          description: Not found
+
 components:
 components:
   schemas:
   schemas:
     ErrorResponse:
     ErrorResponse:
@@ -1174,6 +1366,8 @@ components:
           type: number
           type: number
         readOnly:
         readOnly:
           type: boolean
           type: boolean
+        version:
+          type: string
         features:
         features:
           type: array
           type: array
           items:
           items:
@@ -1266,6 +1460,7 @@ components:
         - NAME
         - NAME
         - OUT_OF_SYNC_REPLICAS
         - OUT_OF_SYNC_REPLICAS
         - TOTAL_PARTITIONS
         - TOTAL_PARTITIONS
+        - REPLICATION_FACTOR
 
 
     Topic:
     Topic:
       type: object
       type: object
@@ -1388,28 +1583,81 @@ components:
       required:
       required:
         - id
         - id
 
 
+    ConsumerGroupState:
+      type: string
+      enum:
+        - UNKNOWN
+        - PREPARING_REBALANCE
+        - COMPLETING_REBALANCE
+        - STABLE
+        - DEAD
+        - EMPTY
+
     ConsumerGroup:
     ConsumerGroup:
       type: object
       type: object
       properties:
       properties:
-        clusterId:
+        groupId:
           type: string
           type: string
-        consumerGroupId:
-            type: string
-        numConsumers:
-            type: integer
-        numTopics:
-            type: integer
+        members:
+          type: integer
+        topics:
+          type: integer
         simple:
         simple:
           type: boolean
           type: boolean
         partitionAssignor:
         partitionAssignor:
           type: string
           type: string
         state:
         state:
+          $ref: "#/components/schemas/ConsumerGroupState"
+        coordinator:
+          $ref: "#/components/schemas/Broker"
+        messagesBehind:
+          type: integer
+          format: int64
+      required:
+        - groupId
+
+    CreateTopicMessage:
+      type: object
+      properties:
+        partition:
+          type: integer
+        key:
           type: string
           type: string
-        coordintor:
+        headers:
+          type: object
+          additionalProperties:
+            type: string
+        content:
           type: string
           type: string
+
+    TopicMessageSchema:
+      type: object
+      properties:
+        key:
+          $ref: "#/components/schemas/MessageSchema"
+        value:
+          $ref: "#/components/schemas/MessageSchema"
       required:
       required:
-        - clusterId
-        - consumerGroupId
+        - key
+        - value
+
+    MessageSchema:
+      type: object
+      properties:
+        name:
+          type: string
+        source:
+          type: string
+          enum:
+            - SOURCE_SCHEMA_REGISTRY
+            - SOURCE_PROTO_FILE
+            - SOURCE_UNKNOWN
+        schema:
+          type: string
+      required:
+        - name
+        - source
+        - schema
 
 
     TopicMessage:
     TopicMessage:
       type: object
       type: object
@@ -1435,7 +1683,7 @@ components:
           additionalProperties:
           additionalProperties:
             type: string
             type: string
         content:
         content:
-          type: object
+          type: string
       required:
       required:
         - partition
         - partition
         - offset
         - offset
@@ -1448,6 +1696,13 @@ components:
         - OFFSET
         - OFFSET
         - TIMESTAMP
         - TIMESTAMP
 
 
+    SeekDirection:
+      type: string
+      enum:
+        - FORWARD
+        - BACKWARD
+      default: FORWARD
+
     Partition:
     Partition:
       type: object
       type: object
       properties:
       properties:
@@ -1471,17 +1726,11 @@ components:
         - offsetMax
         - offsetMax
         - offsetMin
         - offsetMin
 
 
-    ConsumerTopicPartitionDetail:
+    ConsumerGroupTopicPartition:
       type: object
       type: object
       properties:
       properties:
-        groupId:
-          type: string
-        consumerId:
-          type: string
         topic:
         topic:
           type: string
           type: string
-        host:
-          type: string
         partition:
         partition:
           type: integer
           type: integer
         currentOffset:
         currentOffset:
@@ -1493,36 +1742,24 @@ components:
         messagesBehind:
         messagesBehind:
           type: integer
           type: integer
           format: int64
           format: int64
+        consumerId:
+          type: string
+        host:
+          type: string
       required:
       required:
-        - consumerId
+        - topic
+        - partition
 
 
-    TopicConsumerGroups:
-      type: object
-      properties:
-        consumers:
-          type: array
-          items:
-            $ref: '#/components/schemas/ConsumerTopicPartitionDetail'
 
 
     ConsumerGroupDetails:
     ConsumerGroupDetails:
-      type: object
-      properties:
-        consumerGroupId:
-          type: string
-        simple:
-         type: boolean
-        partitionAssignor:
-          type: string
-        state:
-          type: string
-        coordintor:
-          type: string
-        consumers:
-          type: array
-          items:
-            $ref: '#/components/schemas/ConsumerTopicPartitionDetail'
-      required:
-        - consumerGroupId
+      allOf:
+        - $ref: '#/components/schemas/ConsumerGroup'
+        - type: object
+          properties:
+            partitions:
+              type: array
+              items:
+                $ref: '#/components/schemas/ConsumerGroupTopicPartition'
 
 
     Metric:
     Metric:
       type: object
       type: object
@@ -1680,6 +1917,51 @@ components:
         - SOURCE
         - SOURCE
         - SINK
         - SINK
 
 
+    ConsumerGroupOffsetsReset:
+      type: object
+      properties:
+        topic:
+          type: string
+        resetType:
+          $ref: '#/components/schemas/ConsumerGroupOffsetsResetType'
+        partitions:
+          type: array
+          items:
+            type: integer
+          description: list of target partitions, all partitions will be used if it is not set or empty
+        resetToTimestamp:
+          type: integer
+          format: int64
+          description: should be set if resetType is TIMESTAMP
+        partitionsOffsets:
+          type: array
+          items:
+            $ref: '#/components/schemas/PartitionOffset'
+          description: List of partition offsets to reset to, should be set when resetType is OFFSET
+      required:
+        - topic
+        - resetType
+
+    PartitionOffset:
+      type: object
+      properties:
+        partition:
+          type: integer
+        offset:
+          type: integer
+          format: int64
+      required:
+        - partition
+        - offset
+
+    ConsumerGroupOffsetsResetType:
+      type: string
+      enum:
+        - EARLIEST
+        - LATEST
+        - TIMESTAMP
+        - OFFSET
+
     TaskStatus:
     TaskStatus:
       type: object
       type: object
       properties:
       properties:
@@ -1846,3 +2128,43 @@ components:
         - name
         - name
         - connect
         - connect
         - status
         - status
+
+    PartitionsIncrease:
+      type: object
+      properties:
+        totalPartitionsCount:
+          type: integer
+          minimum: 1
+      required:
+        - totalPartitionsCount
+
+    PartitionsIncreaseResponse:
+      type: object
+      properties:
+        totalPartitionsCount:
+          type: integer
+        topicName:
+          type: string
+      required:
+        - totalPartitionsCount
+        - topicName
+
+    ReplicationFactorChange:
+      type: object
+      properties:
+        totalReplicationFactor:
+          type: integer
+          minimum: 1
+      required:
+        - totalReplicationFactor
+
+    ReplicationFactorChangeResponse:
+      type: object
+      properties:
+        totalReplicationFactor:
+          type: integer
+        topicName:
+          type: string
+      required:
+        - totalReplicationFactor
+        - topicName

+ 3 - 0
kafka-ui-e2e-checks/.env.ci

@@ -0,0 +1,3 @@
+USE_LOCAL_BROWSER=false
+SHOULD_START_SELENOID=true
+TURN_OFF_SCREENSHOTS=true

+ 2 - 2
kafka-ui-e2e-checks/.env.example

@@ -1,3 +1,3 @@
-USE_LOCAL_BROWSER=false
-SHOULD_START_SELENOID=true
+USE_LOCAL_BROWSER=true
+SHOULD_START_SELENOID=false
 TURN_OFF_SCREENSHOTS=true
 TURN_OFF_SCREENSHOTS=true

+ 10 - 2
kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/base/BaseTest.java

@@ -10,6 +10,7 @@ import io.github.cdimascio.dotenv.Dotenv;
 import io.qameta.allure.selenide.AllureSelenide;
 import io.qameta.allure.selenide.AllureSelenide;
 import lombok.SneakyThrows;
 import lombok.SneakyThrows;
 import lombok.extern.slf4j.Slf4j;
 import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.io.FileUtils;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.DisplayNameGeneration;
 import org.junit.jupiter.api.DisplayNameGeneration;
 import org.openqa.selenium.remote.DesiredCapabilities;
 import org.openqa.selenium.remote.DesiredCapabilities;
@@ -18,6 +19,7 @@ import org.testcontainers.containers.GenericContainer;
 import org.testcontainers.utility.DockerImageName;
 import org.testcontainers.utility.DockerImageName;
 
 
 import java.io.File;
 import java.io.File;
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.Arrays;
 
 
 @Slf4j
 @Slf4j
@@ -50,9 +52,15 @@ public class BaseTest {
               "-conf", "/etc/selenoid/browsers.json", "-log-output-dir", "/opt/selenoid/logs");
               "-conf", "/etc/selenoid/browsers.json", "-log-output-dir", "/opt/selenoid/logs");
 
 
   static {
   static {
-    if (new File("./.env").exists()) {
-      Dotenv.load().entries().forEach(env -> System.setProperty(env.getKey(), env.getValue()));
+    if (!new File("./.env").exists()) {
+      try {
+        FileUtils.copyFile(new File(".env.example"), new File(".env"));
+      } catch (IOException e) {
+        log.error("couldn't copy .env.example to .env. Please add .env");
+        e.printStackTrace();
+      }
     }
     }
+    Dotenv.load().entries().forEach(env -> System.setProperty(env.getKey(), env.getValue()));
     if (TestConfiguration.CLEAR_REPORTS_DIR) {
     if (TestConfiguration.CLEAR_REPORTS_DIR) {
       clearReports();
       clearReports();
     }
     }

+ 5 - 1
kafka-ui-e2e-checks/src/test/java/com/provectus/kafka/ui/screenshots/Screenshooter.java

@@ -38,6 +38,8 @@ public class Screenshooter {
       Boolean.parseBoolean(System.getProperty("SHOULD_SAVE_SCREENSHOTS_IF_NOT_EXIST", "true"));
       Boolean.parseBoolean(System.getProperty("SHOULD_SAVE_SCREENSHOTS_IF_NOT_EXIST", "true"));
   private static boolean TURN_OFF_SCREENSHOTS =
   private static boolean TURN_OFF_SCREENSHOTS =
       Boolean.parseBoolean(System.getProperty("TURN_OFF_SCREENSHOTS", "false"));
       Boolean.parseBoolean(System.getProperty("TURN_OFF_SCREENSHOTS", "false"));
+  private static boolean USE_LOCAL_BROWSER =
+          Boolean.parseBoolean(System.getProperty("USE_LOCAL_BROWSER", "false"));
 
 
   private File newFile(String name) {
   private File newFile(String name) {
     var file = new File(name);
     var file = new File(name);
@@ -58,7 +60,9 @@ public class Screenshooter {
 
 
   @SneakyThrows
   @SneakyThrows
   public void compareScreenshots(String name, boolean shouldUpdateScreenshotIfDiffer) {
   public void compareScreenshots(String name, boolean shouldUpdateScreenshotIfDiffer) {
-    if (TURN_OFF_SCREENSHOTS) {
+    if (TURN_OFF_SCREENSHOTS || USE_LOCAL_BROWSER) {
+      log.warn("compareScreenshots turned off due TURN_OFF_SCREENSHOTS || USE_LOCAL_BROWSER: %b || %b"
+              .formatted(TURN_OFF_SCREENSHOTS,USE_LOCAL_BROWSER));
       return;
       return;
     }
     }
     if (!doesScreenshotExist(name)) {
     if (!doesScreenshotExist(name)) {

+ 1 - 1
kafka-ui-react-app/.nvmrc

@@ -1 +1 @@
-v14.15.4
+v14.17.1

+ 10 - 0
kafka-ui-react-app/README.md

@@ -26,6 +26,16 @@ Go to react app folder
 cd ./kafka-ui-react-app
 cd ./kafka-ui-react-app
 ```
 ```
 
 
+Install Husky
+```
+npm install -g husky
+```
+
+Install dependencies
+```
+npm install
+```
+
 Generate API clients from OpenAPI document
 Generate API clients from OpenAPI document
 ```sh
 ```sh
 npm run gen:sources
 npm run gen:sources

File diff suppressed because it is too large
+ 22 - 20583
kafka-ui-react-app/package-lock.json


+ 20 - 20
kafka-ui-react-app/package.json

@@ -9,7 +9,7 @@
     "@hookform/resolvers": "^2.5.1",
     "@hookform/resolvers": "^2.5.1",
     "@rooks/use-outside-click-ref": "^4.10.1",
     "@rooks/use-outside-click-ref": "^4.10.1",
     "ace-builds": "^1.4.12",
     "ace-builds": "^1.4.12",
-    "bulma": "^0.9.2",
+    "bulma": "^0.9.3",
     "bulma-switch": "^2.0.0",
     "bulma-switch": "^2.0.0",
     "classnames": "^2.2.6",
     "classnames": "^2.2.6",
     "date-fns": "^2.19.0",
     "date-fns": "^2.19.0",
@@ -22,7 +22,7 @@
     "react-ace": "^9.3.0",
     "react-ace": "^9.3.0",
     "react-datepicker": "^4.1.1",
     "react-datepicker": "^4.1.1",
     "react-dom": "^17.0.1",
     "react-dom": "^17.0.1",
-    "react-hook-form": "^7.6.9",
+    "react-hook-form": "7.6.9",
     "react-json-tree": "^0.15.0",
     "react-json-tree": "^0.15.0",
     "react-multi-select-component": "^4.0.0",
     "react-multi-select-component": "^4.0.0",
     "react-redux": "^7.2.2",
     "react-redux": "^7.2.2",
@@ -32,7 +32,7 @@
     "redux-thunk": "^2.3.0",
     "redux-thunk": "^2.3.0",
     "reselect": "^4.0.0",
     "reselect": "^4.0.0",
     "typesafe-actions": "^5.1.0",
     "typesafe-actions": "^5.1.0",
-    "use-debounce": "^6.0.1",
+    "use-debounce": "^7.0.0",
     "uuid": "^8.3.1",
     "uuid": "^8.3.1",
     "yup": "^0.32.9"
     "yup": "^0.32.9"
   },
   },
@@ -71,55 +71,55 @@
     ]
     ]
   },
   },
   "devDependencies": {
   "devDependencies": {
-    "@jest/types": "^26.6.2",
-    "@openapitools/openapi-generator-cli": "^2.2.5",
+    "@jest/types": "^27.0.6",
+    "@openapitools/openapi-generator-cli": "^2.3.5",
     "@testing-library/jest-dom": "^5.11.10",
     "@testing-library/jest-dom": "^5.11.10",
     "@types/classnames": "^2.2.11",
     "@types/classnames": "^2.2.11",
     "@types/enzyme": "^3.10.8",
     "@types/enzyme": "^3.10.8",
     "@types/jest": "^26.0.21",
     "@types/jest": "^26.0.21",
     "@types/lodash": "^4.14.165",
     "@types/lodash": "^4.14.165",
-    "@types/node": "^15.0.1",
+    "@types/node": "^16.0.0",
     "@types/node-fetch": "^2.5.9",
     "@types/node-fetch": "^2.5.9",
-    "@types/react": "^17.0.3",
-    "@types/react-datepicker": "^3.1.8",
+    "@types/react": "^17.0.13",
+    "@types/react-datepicker": "^4.1.1",
     "@types/react-dom": "^17.0.2",
     "@types/react-dom": "^17.0.2",
     "@types/react-redux": "^7.1.11",
     "@types/react-redux": "^7.1.11",
     "@types/react-router-dom": "^5.1.6",
     "@types/react-router-dom": "^5.1.6",
     "@types/react-test-renderer": "^17.0.1",
     "@types/react-test-renderer": "^17.0.1",
     "@types/redux-mock-store": "^1.0.2",
     "@types/redux-mock-store": "^1.0.2",
-    "@types/uuid": "^8.3.0",
-    "@typescript-eslint/eslint-plugin": "^4.20.0",
-    "@typescript-eslint/parser": "^4.20.0",
+    "@types/uuid": "^8.3.1",
+    "@typescript-eslint/eslint-plugin": "^4.28.1",
+    "@typescript-eslint/parser": "^4.28.1",
     "@wojtekmaj/enzyme-adapter-react-17": "^0.6.0",
     "@wojtekmaj/enzyme-adapter-react-17": "^0.6.0",
     "dotenv": "^10.0.0",
     "dotenv": "^10.0.0",
     "enzyme": "^3.11.0",
     "enzyme": "^3.11.0",
     "enzyme-to-json": "^3.6.1",
     "enzyme-to-json": "^3.6.1",
-    "eslint": "^7.22.0",
+    "eslint": "^7.30.0",
     "eslint-config-airbnb": "^18.2.1",
     "eslint-config-airbnb": "^18.2.1",
     "eslint-config-airbnb-typescript": "^12.3.1",
     "eslint-config-airbnb-typescript": "^12.3.1",
-    "eslint-config-prettier": "^8.1.0",
+    "eslint-config-prettier": "^8.3.0",
     "eslint-plugin-import": "^2.22.1",
     "eslint-plugin-import": "^2.22.1",
     "eslint-plugin-jsx-a11y": "^6.4.1",
     "eslint-plugin-jsx-a11y": "^6.4.1",
-    "eslint-plugin-prettier": "^3.1.4",
+    "eslint-plugin-prettier": "^3.4.0",
     "eslint-plugin-react": "^7.21.5",
     "eslint-plugin-react": "^7.21.5",
     "eslint-plugin-react-hooks": "^4.2.0",
     "eslint-plugin-react-hooks": "^4.2.0",
-    "esprint": "^2.0.0",
+    "esprint": "^3.1.0",
     "fetch-mock-jest": "^1.5.1",
     "fetch-mock-jest": "^1.5.1",
-    "husky": "^6.0.0",
+    "husky": "^7.0.0",
     "jest-sonar-reporter": "^2.0.0",
     "jest-sonar-reporter": "^2.0.0",
     "lint-staged": "^11.0.0",
     "lint-staged": "^11.0.0",
     "node-sass": "^5.0.0",
     "node-sass": "^5.0.0",
-    "prettier": "^2.2.1",
+    "prettier": "^2.3.1",
     "react-scripts": "4.0.3",
     "react-scripts": "4.0.3",
     "react-test-renderer": "^17.0.2",
     "react-test-renderer": "^17.0.2",
     "redux-mock-store": "^1.5.4",
     "redux-mock-store": "^1.5.4",
     "ts-jest": "^26.5.4",
     "ts-jest": "^26.5.4",
     "ts-node": "^10.0.0",
     "ts-node": "^10.0.0",
-    "typescript": "^4.2.3"
+    "typescript": "^4.3.5"
   },
   },
   "engines": {
   "engines": {
-    "node": ">=14.15.4",
-    "npm": ">=7.15.1"
+    "node": "14.17.1",
+    "npm": "6.14.13"
   },
   },
   "proxy": "http://localhost:8080",
   "proxy": "http://localhost:8080",
   "jest": {
   "jest": {

+ 88 - 0
kafka-ui-react-app/src/components/Brokers/__test__/Brokers.spec.tsx

@@ -0,0 +1,88 @@
+import React from 'react';
+import { mount } from 'enzyme';
+import Brokers from 'components/Brokers/Brokers';
+import { ClusterName } from 'redux/interfaces';
+import { StaticRouter } from 'react-router';
+import { ClusterStats } from 'generated-sources';
+
+interface Props extends ClusterStats {
+  isFetched: boolean;
+  fetchClusterStats: (clusterName: ClusterName) => void;
+  fetchBrokers: (clusterName: ClusterName) => void;
+}
+
+describe('Brokers Component', () => {
+  const pathname = `ui/clusters/local/brokers`;
+
+  describe('Brokers Empty', () => {
+    const setupEmptyComponent = (props: Partial<Props> = {}) => (
+      <StaticRouter location={{ pathname }} context={{}}>
+        <Brokers
+          brokerCount={0}
+          activeControllers={0}
+          zooKeeperStatus={0}
+          onlinePartitionCount={0}
+          offlinePartitionCount={0}
+          inSyncReplicasCount={0}
+          outOfSyncReplicasCount={0}
+          underReplicatedPartitionCount={0}
+          fetchClusterStats={jest.fn()}
+          fetchBrokers={jest.fn()}
+          diskUsage={undefined}
+          isFetched={false}
+          {...props}
+        />
+      </StaticRouter>
+    );
+    it('renders section', () => {
+      const component = mount(setupEmptyComponent());
+      expect(component.exists('.section')).toBeTruthy();
+    });
+
+    it('renders section with is-danger selector', () => {
+      const component = mount(setupEmptyComponent());
+      expect(component.exists('.is-danger')).toBeTruthy();
+    });
+
+    it('matches Brokers Empty snapshot', () => {
+      expect(mount(setupEmptyComponent())).toMatchSnapshot();
+    });
+  });
+
+  describe('Brokers', () => {
+    const setupComponent = (props: Partial<Props> = {}) => (
+      <StaticRouter location={{ pathname }} context={{}}>
+        <Brokers
+          brokerCount={1}
+          activeControllers={1}
+          zooKeeperStatus={1}
+          onlinePartitionCount={64}
+          offlinePartitionCount={0}
+          inSyncReplicasCount={64}
+          outOfSyncReplicasCount={0}
+          underReplicatedPartitionCount={0}
+          fetchClusterStats={jest.fn()}
+          fetchBrokers={jest.fn()}
+          diskUsage={[
+            {
+              brokerId: 1,
+              segmentCount: 64,
+              segmentSize: 60718,
+            },
+          ]}
+          isFetched
+          {...props}
+        />
+      </StaticRouter>
+    );
+
+    it('renders section with is-success selector', () => {
+      const component = mount(setupComponent());
+      expect(component.exists('.is-success')).toBeTruthy();
+    });
+
+    it('matches snapshot', () => {
+      expect(mount(setupComponent())).toMatchSnapshot();
+    });
+  });
+});

+ 8 - 0
kafka-ui-react-app/src/components/Brokers/__test__/BrokersContainer.spec.tsx

@@ -0,0 +1,8 @@
+import React from 'react';
+import { containerRendersView } from 'lib/testHelpers';
+import Brokers from 'components/Brokers/Brokers';
+import BrokersContainer from 'components/Brokers/BrokersContainer';
+
+describe('BrokersContainer', () => {
+  containerRendersView(<BrokersContainer />, Brokers);
+});

+ 731 - 0
kafka-ui-react-app/src/components/Brokers/__test__/__snapshots__/Brokers.spec.tsx.snap

@@ -0,0 +1,731 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`Brokers Component Brokers Empty matches Brokers Empty snapshot 1`] = `
+<StaticRouter
+  context={Object {}}
+  location={
+    Object {
+      "pathname": "ui/clusters/local/brokers",
+    }
+  }
+>
+  <Router
+    history={
+      Object {
+        "action": "POP",
+        "block": [Function],
+        "createHref": [Function],
+        "go": [Function],
+        "goBack": [Function],
+        "goForward": [Function],
+        "listen": [Function],
+        "location": Object {
+          "hash": "",
+          "pathname": "ui/clusters/local/brokers",
+          "search": "",
+        },
+        "push": [Function],
+        "replace": [Function],
+      }
+    }
+    staticContext={Object {}}
+  >
+    <Brokers
+      activeControllers={0}
+      brokerCount={0}
+      fetchBrokers={
+        [MockFunction] {
+          "calls": Array [
+            Array [
+              undefined,
+            ],
+          ],
+          "results": Array [
+            Object {
+              "type": "return",
+              "value": undefined,
+            },
+          ],
+        }
+      }
+      fetchClusterStats={
+        [MockFunction] {
+          "calls": Array [
+            Array [
+              undefined,
+            ],
+          ],
+          "results": Array [
+            Object {
+              "type": "return",
+              "value": undefined,
+            },
+          ],
+        }
+      }
+      inSyncReplicasCount={0}
+      isFetched={false}
+      offlinePartitionCount={0}
+      onlinePartitionCount={0}
+      outOfSyncReplicasCount={0}
+      underReplicatedPartitionCount={0}
+      zooKeeperStatus={0}
+    >
+      <div
+        className="section"
+      >
+        <Breadcrumb>
+          <nav
+            aria-label="breadcrumbs"
+            className="breadcrumb"
+          >
+            <ul>
+              <li
+                className="is-active"
+              >
+                <span
+                  className=""
+                >
+                  Brokers overview
+                </span>
+              </li>
+            </ul>
+          </nav>
+        </Breadcrumb>
+        <MetricsWrapper
+          title="Uptime"
+        >
+          <div
+            className="box"
+          >
+            <h5
+              className="subtitle is-6"
+            >
+              Uptime
+            </h5>
+            <div
+              className="level"
+            >
+              <Indicator
+                className="is-one-third"
+                label="Total Brokers"
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Total Brokers"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Total Brokers
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      0
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                className="is-one-third"
+                label="Active Controllers"
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Active Controllers"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Active Controllers
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      0
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                className="is-one-third"
+                label="Zookeeper Status"
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Zookeeper Status"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Zookeeper Status
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      <span
+                        className="tag is-danger"
+                      >
+                        Offline
+                      </span>
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+            </div>
+          </div>
+        </MetricsWrapper>
+        <MetricsWrapper
+          title="Partitions"
+        >
+          <div
+            className="box"
+          >
+            <h5
+              className="subtitle is-6"
+            >
+              Partitions
+            </h5>
+            <div
+              className="level"
+            >
+              <Indicator
+                label="Online"
+              >
+                <div
+                  className="level-item"
+                >
+                  <div
+                    title="Online"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Online
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      <span
+                        className=""
+                      >
+                        0
+                      </span>
+                      <span
+                        className="subtitle has-text-weight-light"
+                      >
+                         
+                        of
+                        0
+                      </span>
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                label="URP"
+                title="Under replicated partitions"
+              >
+                <div
+                  className="level-item"
+                >
+                  <div
+                    title="Under replicated partitions"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      URP
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      0
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                label="In Sync Replicas"
+              >
+                <div
+                  className="level-item"
+                >
+                  <div
+                    title="In Sync Replicas"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      In Sync Replicas
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      0
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                label="Out of Sync Replicas"
+              >
+                <div
+                  className="level-item"
+                >
+                  <div
+                    title="Out of Sync Replicas"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Out of Sync Replicas
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      0
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+            </div>
+          </div>
+        </MetricsWrapper>
+        <MetricsWrapper
+          multiline={true}
+          title="Disk Usage"
+        >
+          <div
+            className="box"
+          >
+            <h5
+              className="subtitle is-6"
+            >
+              Disk Usage
+            </h5>
+            <div
+              className="level level-multiline"
+            />
+          </div>
+        </MetricsWrapper>
+      </div>
+    </Brokers>
+  </Router>
+</StaticRouter>
+`;
+
+exports[`Brokers Component Brokers matches snapshot 1`] = `
+<StaticRouter
+  context={Object {}}
+  location={
+    Object {
+      "pathname": "ui/clusters/local/brokers",
+    }
+  }
+>
+  <Router
+    history={
+      Object {
+        "action": "POP",
+        "block": [Function],
+        "createHref": [Function],
+        "go": [Function],
+        "goBack": [Function],
+        "goForward": [Function],
+        "listen": [Function],
+        "location": Object {
+          "hash": "",
+          "pathname": "ui/clusters/local/brokers",
+          "search": "",
+        },
+        "push": [Function],
+        "replace": [Function],
+      }
+    }
+    staticContext={Object {}}
+  >
+    <Brokers
+      activeControllers={1}
+      brokerCount={1}
+      diskUsage={
+        Array [
+          Object {
+            "brokerId": 1,
+            "segmentCount": 64,
+            "segmentSize": 60718,
+          },
+        ]
+      }
+      fetchBrokers={
+        [MockFunction] {
+          "calls": Array [
+            Array [
+              undefined,
+            ],
+          ],
+          "results": Array [
+            Object {
+              "type": "return",
+              "value": undefined,
+            },
+          ],
+        }
+      }
+      fetchClusterStats={
+        [MockFunction] {
+          "calls": Array [
+            Array [
+              undefined,
+            ],
+          ],
+          "results": Array [
+            Object {
+              "type": "return",
+              "value": undefined,
+            },
+          ],
+        }
+      }
+      inSyncReplicasCount={64}
+      isFetched={true}
+      offlinePartitionCount={0}
+      onlinePartitionCount={64}
+      outOfSyncReplicasCount={0}
+      underReplicatedPartitionCount={0}
+      zooKeeperStatus={1}
+    >
+      <div
+        className="section"
+      >
+        <Breadcrumb>
+          <nav
+            aria-label="breadcrumbs"
+            className="breadcrumb"
+          >
+            <ul>
+              <li
+                className="is-active"
+              >
+                <span
+                  className=""
+                >
+                  Brokers overview
+                </span>
+              </li>
+            </ul>
+          </nav>
+        </Breadcrumb>
+        <MetricsWrapper
+          title="Uptime"
+        >
+          <div
+            className="box"
+          >
+            <h5
+              className="subtitle is-6"
+            >
+              Uptime
+            </h5>
+            <div
+              className="level"
+            >
+              <Indicator
+                className="is-one-third"
+                label="Total Brokers"
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Total Brokers"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Total Brokers
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      1
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                className="is-one-third"
+                label="Active Controllers"
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Active Controllers"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Active Controllers
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      1
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                className="is-one-third"
+                label="Zookeeper Status"
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Zookeeper Status"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Zookeeper Status
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      <span
+                        className="tag is-success"
+                      >
+                        Online
+                      </span>
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+            </div>
+          </div>
+        </MetricsWrapper>
+        <MetricsWrapper
+          title="Partitions"
+        >
+          <div
+            className="box"
+          >
+            <h5
+              className="subtitle is-6"
+            >
+              Partitions
+            </h5>
+            <div
+              className="level"
+            >
+              <Indicator
+                label="Online"
+              >
+                <div
+                  className="level-item"
+                >
+                  <div
+                    title="Online"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Online
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      <span
+                        className=""
+                      >
+                        64
+                      </span>
+                      <span
+                        className="subtitle has-text-weight-light"
+                      >
+                         
+                        of
+                        64
+                      </span>
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                label="URP"
+                title="Under replicated partitions"
+              >
+                <div
+                  className="level-item"
+                >
+                  <div
+                    title="Under replicated partitions"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      URP
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      0
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                label="In Sync Replicas"
+              >
+                <div
+                  className="level-item"
+                >
+                  <div
+                    title="In Sync Replicas"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      In Sync Replicas
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      64
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                label="Out of Sync Replicas"
+              >
+                <div
+                  className="level-item"
+                >
+                  <div
+                    title="Out of Sync Replicas"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Out of Sync Replicas
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      0
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+            </div>
+          </div>
+        </MetricsWrapper>
+        <MetricsWrapper
+          multiline={true}
+          title="Disk Usage"
+        >
+          <div
+            className="box"
+          >
+            <h5
+              className="subtitle is-6"
+            >
+              Disk Usage
+            </h5>
+            <div
+              className="level level-multiline"
+            >
+              <Indicator
+                className="is-one-third"
+                label="Broker"
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Broker"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Broker
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      1
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                className="is-one-third"
+                label="Segment Size"
+                title=""
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Segment Size"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Segment Size
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      <BytesFormatted
+                        value={60718}
+                      >
+                        <span>
+                          59KB
+                        </span>
+                      </BytesFormatted>
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+              <Indicator
+                className="is-one-third"
+                label="Segment count"
+              >
+                <div
+                  className="level-item is-one-third"
+                >
+                  <div
+                    title="Segment count"
+                  >
+                    <p
+                      className="heading"
+                    >
+                      Segment count
+                    </p>
+                    <p
+                      className="title has-text-centered"
+                    >
+                      64
+                    </p>
+                  </div>
+                </div>
+              </Indicator>
+            </div>
+          </div>
+        </MetricsWrapper>
+      </div>
+    </Brokers>
+  </Router>
+</StaticRouter>
+`;

+ 1 - 1
kafka-ui-react-app/src/components/Cluster/__tests__/Cluster.spec.tsx

@@ -68,7 +68,7 @@ describe('Cluster', () => {
       expect(wrapper.exists('mock-Connect')).toBeFalsy();
       expect(wrapper.exists('mock-Connect')).toBeFalsy();
     });
     });
     it('renders Schemas if KAFKA_CONNECT is configured', async () => {
     it('renders Schemas if KAFKA_CONNECT is configured', async () => {
-      await store.dispatch(
+      store.dispatch(
         fetchClusterListAction.success([
         fetchClusterListAction.success([
           {
           {
             ...onlineClusterPayload,
             ...onlineClusterPayload,

+ 44 - 8
kafka-ui-react-app/src/components/ConsumerGroups/Details/Details.tsx

@@ -6,34 +6,52 @@ import { ConsumerGroupID } from 'redux/interfaces/consumerGroup';
 import {
 import {
   ConsumerGroup,
   ConsumerGroup,
   ConsumerGroupDetails,
   ConsumerGroupDetails,
-  ConsumerTopicPartitionDetail,
+  ConsumerGroupTopicPartition,
 } from 'generated-sources';
 } from 'generated-sources';
 import PageLoader from 'components/common/PageLoader/PageLoader';
 import PageLoader from 'components/common/PageLoader/PageLoader';
+import ConfirmationModal from 'components/common/ConfirmationModal/ConfirmationModal';
+import { useHistory } from 'react-router';
 
 
 import ListItem from './ListItem';
 import ListItem from './ListItem';
 
 
-interface Props extends ConsumerGroup, ConsumerGroupDetails {
+export interface Props extends ConsumerGroup, ConsumerGroupDetails {
   clusterName: ClusterName;
   clusterName: ClusterName;
-  consumerGroupID: ConsumerGroupID;
-  consumers?: ConsumerTopicPartitionDetail[];
+  consumers?: ConsumerGroupTopicPartition[];
   isFetched: boolean;
   isFetched: boolean;
+  isDeleted: boolean;
   fetchConsumerGroupDetails: (
   fetchConsumerGroupDetails: (
     clusterName: ClusterName,
     clusterName: ClusterName,
     consumerGroupID: ConsumerGroupID
     consumerGroupID: ConsumerGroupID
   ) => void;
   ) => void;
+  deleteConsumerGroup: (clusterName: string, id: ConsumerGroupID) => void;
 }
 }
 
 
 const Details: React.FC<Props> = ({
 const Details: React.FC<Props> = ({
   clusterName,
   clusterName,
-  consumerGroupID,
+  groupId,
   consumers,
   consumers,
   isFetched,
   isFetched,
+  isDeleted,
   fetchConsumerGroupDetails,
   fetchConsumerGroupDetails,
+  deleteConsumerGroup,
 }) => {
 }) => {
   React.useEffect(() => {
   React.useEffect(() => {
-    fetchConsumerGroupDetails(clusterName, consumerGroupID);
-  }, [fetchConsumerGroupDetails, clusterName, consumerGroupID]);
+    fetchConsumerGroupDetails(clusterName, groupId);
+  }, [fetchConsumerGroupDetails, clusterName, groupId]);
   const items = consumers || [];
   const items = consumers || [];
+  const [isConfirmationModelVisible, setIsConfirmationModelVisible] =
+    React.useState<boolean>(false);
+  const history = useHistory();
+
+  const onDelete = () => {
+    setIsConfirmationModelVisible(false);
+    deleteConsumerGroup(clusterName, groupId);
+  };
+  React.useEffect(() => {
+    if (isDeleted) {
+      history.push(clusterConsumerGroupsPath(clusterName));
+    }
+  }, [isDeleted]);
 
 
   return (
   return (
     <div className="section">
     <div className="section">
@@ -47,13 +65,24 @@ const Details: React.FC<Props> = ({
               },
               },
             ]}
             ]}
           >
           >
-            {consumerGroupID}
+            {groupId}
           </Breadcrumb>
           </Breadcrumb>
         </div>
         </div>
       </div>
       </div>
 
 
       {isFetched ? (
       {isFetched ? (
         <div className="box">
         <div className="box">
+          <div className="level">
+            <div className="level-item level-right buttons">
+              <button
+                type="button"
+                className="button is-danger"
+                onClick={() => setIsConfirmationModelVisible(true)}
+              >
+                Delete consumer group
+              </button>
+            </div>
+          </div>
           <table className="table is-striped is-fullwidth">
           <table className="table is-striped is-fullwidth">
             <thead>
             <thead>
               <tr>
               <tr>
@@ -80,6 +109,13 @@ const Details: React.FC<Props> = ({
       ) : (
       ) : (
         <PageLoader />
         <PageLoader />
       )}
       )}
+      <ConfirmationModal
+        isOpen={isConfirmationModelVisible}
+        onCancel={() => setIsConfirmationModelVisible(false)}
+        onConfirm={onDelete}
+      >
+        Are you sure you want to delete this consumer group?
+      </ConfirmationModal>
     </div>
     </div>
   );
   );
 };
 };

+ 8 - 2
kafka-ui-react-app/src/components/ConsumerGroups/Details/DetailsContainer.ts

@@ -3,10 +3,14 @@ import { ClusterName, RootState } from 'redux/interfaces';
 import { withRouter, RouteComponentProps } from 'react-router-dom';
 import { withRouter, RouteComponentProps } from 'react-router-dom';
 import {
 import {
   getIsConsumerGroupDetailsFetched,
   getIsConsumerGroupDetailsFetched,
+  getIsConsumerGroupsDeleted,
   getConsumerGroupByID,
   getConsumerGroupByID,
 } from 'redux/reducers/consumerGroups/selectors';
 } from 'redux/reducers/consumerGroups/selectors';
 import { ConsumerGroupID } from 'redux/interfaces/consumerGroup';
 import { ConsumerGroupID } from 'redux/interfaces/consumerGroup';
-import { fetchConsumerGroupDetails } from 'redux/actions/thunks';
+import {
+  deleteConsumerGroup,
+  fetchConsumerGroupDetails,
+} from 'redux/actions/thunks';
 
 
 import Details from './Details';
 import Details from './Details';
 
 
@@ -26,8 +30,8 @@ const mapStateToProps = (
   }: OwnProps
   }: OwnProps
 ) => ({
 ) => ({
   clusterName,
   clusterName,
-  consumerGroupID,
   isFetched: getIsConsumerGroupDetailsFetched(state),
   isFetched: getIsConsumerGroupDetailsFetched(state),
+  isDeleted: getIsConsumerGroupsDeleted(state),
   ...getConsumerGroupByID(state, consumerGroupID),
   ...getConsumerGroupByID(state, consumerGroupID),
 });
 });
 
 
@@ -36,6 +40,8 @@ const mapDispatchToProps = {
     clusterName: ClusterName,
     clusterName: ClusterName,
     consumerGroupID: ConsumerGroupID
     consumerGroupID: ConsumerGroupID
   ) => fetchConsumerGroupDetails(clusterName, consumerGroupID),
   ) => fetchConsumerGroupDetails(clusterName, consumerGroupID),
+  deleteConsumerGroup: (clusterName: string, id: ConsumerGroupID) =>
+    deleteConsumerGroup(clusterName, id),
 };
 };
 
 
 export default withRouter(
 export default withRouter(

+ 2 - 2
kafka-ui-react-app/src/components/ConsumerGroups/Details/ListItem.tsx

@@ -1,11 +1,11 @@
 import React from 'react';
 import React from 'react';
-import { ConsumerTopicPartitionDetail } from 'generated-sources';
+import { ConsumerGroupTopicPartition } from 'generated-sources';
 import { NavLink } from 'react-router-dom';
 import { NavLink } from 'react-router-dom';
 import { ClusterName } from 'redux/interfaces/cluster';
 import { ClusterName } from 'redux/interfaces/cluster';
 
 
 interface Props {
 interface Props {
   clusterName: ClusterName;
   clusterName: ClusterName;
-  consumer: ConsumerTopicPartitionDetail;
+  consumer: ConsumerGroupTopicPartition;
 }
 }
 
 
 const ListItem: React.FC<Props> = ({ clusterName, consumer }) => {
 const ListItem: React.FC<Props> = ({ clusterName, consumer }) => {

+ 102 - 0
kafka-ui-react-app/src/components/ConsumerGroups/Details/__tests__/Details.spec.tsx

@@ -0,0 +1,102 @@
+import Details, { Props } from 'components/ConsumerGroups/Details/Details';
+import { mount, shallow } from 'enzyme';
+import React from 'react';
+import { StaticRouter } from 'react-router';
+
+const mockHistory = {
+  push: jest.fn(),
+};
+jest.mock('react-router', () => ({
+  ...jest.requireActual('react-router'),
+  useHistory: () => mockHistory,
+}));
+
+describe('Details component', () => {
+  const setupWrapper = (props?: Partial<Props>) => (
+    <Details
+      clusterName="local"
+      groupId="test"
+      isFetched
+      isDeleted={false}
+      fetchConsumerGroupDetails={jest.fn()}
+      deleteConsumerGroup={jest.fn()}
+      consumers={[
+        {
+          consumerId:
+            'consumer-messages-consumer-1-122fbf98-643b-491d-8aec-c0641d2513d0',
+          topic: 'messages',
+          host: '/172.31.9.153',
+          partition: 6,
+          currentOffset: 394,
+          endOffset: 394,
+          messagesBehind: 0,
+        },
+        {
+          consumerId:
+            'consumer-messages-consumer-1-122fbf98-643b-491d-8aec-c0641d2513d1',
+          topic: 'messages',
+          host: '/172.31.9.153',
+          partition: 7,
+          currentOffset: 384,
+          endOffset: 384,
+          messagesBehind: 0,
+        },
+      ]}
+      {...props}
+    />
+  );
+  describe('when consumer gruops are NOT fetched', () => {
+    it('Matches the snapshot', () => {
+      expect(shallow(setupWrapper({ isFetched: false }))).toMatchSnapshot();
+    });
+  });
+
+  describe('when consumer gruops are fetched', () => {
+    it('Matches the snapshot', () => {
+      expect(shallow(setupWrapper())).toMatchSnapshot();
+    });
+
+    describe('onDelete', () => {
+      it('calls deleteConsumerGroup', () => {
+        const deleteConsumerGroup = jest.fn();
+        const component = mount(
+          <StaticRouter>{setupWrapper({ deleteConsumerGroup })}</StaticRouter>
+        );
+        component.find('button').at(0).simulate('click');
+        component.update();
+        component
+          .find('ConfirmationModal')
+          .find('button')
+          .at(1)
+          .simulate('click');
+        expect(deleteConsumerGroup).toHaveBeenCalledTimes(1);
+      });
+
+      describe('on ConfirmationModal cancel', () => {
+        it('does not call deleteConsumerGroup', () => {
+          const deleteConsumerGroup = jest.fn();
+          const component = mount(
+            <StaticRouter>{setupWrapper({ deleteConsumerGroup })}</StaticRouter>
+          );
+          component.find('button').at(0).simulate('click');
+          component.update();
+          component
+            .find('ConfirmationModal')
+            .find('button')
+            .at(0)
+            .simulate('click');
+          expect(deleteConsumerGroup).toHaveBeenCalledTimes(0);
+        });
+      });
+
+      describe('after deletion', () => {
+        it('calls history.push', () => {
+          mount(
+            <StaticRouter>{setupWrapper({ isDeleted: true })}</StaticRouter>
+          );
+          expect(mockHistory.push).toHaveBeenCalledTimes(1);
+        });
+      });
+    });
+  });
+});

+ 150 - 0
kafka-ui-react-app/src/components/ConsumerGroups/Details/__tests__/__snapshots__/Details.spec.tsx.snap

@@ -0,0 +1,150 @@
+// Jest Snapshot v1, https://goo.gl/fbAQLP
+
+exports[`Details component when consumer gruops are NOT fetched Matches the snapshot 1`] = `
+<div
+  className="section"
+>
+  <div
+    className="level"
+  >
+    <div
+      className="level-item level-left"
+    >
+      <Breadcrumb
+        links={
+          Array [
+            Object {
+              "href": "/ui/clusters/local/consumer-groups",
+              "label": "All Consumer Groups",
+            },
+          ]
+        }
+      >
+        test
+      </Breadcrumb>
+    </div>
+  </div>
+  <PageLoader />
+  <ConfirmationModal
+    isOpen={false}
+    onCancel={[Function]}
+    onConfirm={[Function]}
+  >
+    Are you sure you want to delete this consumer group?
+  </ConfirmationModal>
+</div>
+`;
+
+exports[`Details component when consumer gruops are fetched Matches the snapshot 1`] = `
+<div
+  className="section"
+>
+  <div
+    className="level"
+  >
+    <div
+      className="level-item level-left"
+    >
+      <Breadcrumb
+        links={
+          Array [
+            Object {
+              "href": "/ui/clusters/local/consumer-groups",
+              "label": "All Consumer Groups",
+            },
+          ]
+        }
+      >
+        test
+      </Breadcrumb>
+    </div>
+  </div>
+  <div
+    className="box"
+  >
+    <div
+      className="level"
+    >
+      <div
+        className="level-item level-right buttons"
+      >
+        <button
+          className="button is-danger"
+          onClick={[Function]}
+          type="button"
+        >
+          Delete consumer group
+        </button>
+      </div>
+    </div>
+    <table
+      className="table is-striped is-fullwidth"
+    >
+      <thead>
+        <tr>
+          <th>
+            Consumer ID
+          </th>
+          <th>
+            Host
+          </th>
+          <th>
+            Topic
+          </th>
+          <th>
+            Partition
+          </th>
+          <th>
+            Messages behind
+          </th>
+          <th>
+            Current offset
+          </th>
+          <th>
+            End offset
+          </th>
+        </tr>
+      </thead>
+      <tbody>
+        <ListItem
+          clusterName="local"
+          consumer={
+            Object {
+              "consumerId": "consumer-messages-consumer-1-122fbf98-643b-491d-8aec-c0641d2513d0",
+              "currentOffset": 394,
+              "endOffset": 394,
+              "host": "/172.31.9.153",
+              "messagesBehind": 0,
+              "partition": 6,
+              "topic": "messages",
+            }
+          }
+          key="consumer-messages-consumer-1-122fbf98-643b-491d-8aec-c0641d2513d0"
+        />
+        <ListItem
+          clusterName="local"
+          consumer={
+            Object {
+              "consumerId": "consumer-messages-consumer-1-122fbf98-643b-491d-8aec-c0641d2513d1",
+              "currentOffset": 384,
+              "endOffset": 384,
+              "host": "/172.31.9.153",
+              "messagesBehind": 0,
+              "partition": 7,
+              "topic": "messages",
+            }
+          }
+          key="consumer-messages-consumer-1-122fbf98-643b-491d-8aec-c0641d2513d1"
+        />
+      </tbody>
+    </table>
+  </div>
+  <ConfirmationModal
+    isOpen={false}
+    onCancel={[Function]}
+    onConfirm={[Function]}
+  >
+    Are you sure you want to delete this consumer group?
+  </ConfirmationModal>
+</div>
+`;

+ 6 - 3
kafka-ui-react-app/src/components/ConsumerGroups/List/List.tsx

@@ -41,8 +41,11 @@ const List: React.FC<Props> = ({ consumerGroups }) => {
               <thead>
               <thead>
                 <tr>
                 <tr>
                   <th>Consumer group ID</th>
                   <th>Consumer group ID</th>
-                  <th>Num of consumers</th>
+                  <th>Num of members</th>
                   <th>Num of topics</th>
                   <th>Num of topics</th>
+                  <th>Messages behind</th>
+                  <th>Coordinator</th>
+                  <th>State</th>
                 </tr>
                 </tr>
               </thead>
               </thead>
               <tbody>
               <tbody>
@@ -50,11 +53,11 @@ const List: React.FC<Props> = ({ consumerGroups }) => {
                   .filter(
                   .filter(
                     (consumerGroup) =>
                     (consumerGroup) =>
                       !searchText ||
                       !searchText ||
-                      consumerGroup?.consumerGroupId?.indexOf(searchText) >= 0
+                      consumerGroup?.groupId?.indexOf(searchText) >= 0
                   )
                   )
                   .map((consumerGroup) => (
                   .map((consumerGroup) => (
                     <ListItem
                     <ListItem
-                      key={consumerGroup.consumerGroupId}
+                      key={consumerGroup.groupId}
                       consumerGroup={consumerGroup}
                       consumerGroup={consumerGroup}
                     />
                     />
                   ))}
                   ))}

+ 10 - 4
kafka-ui-react-app/src/components/ConsumerGroups/List/ListItem.tsx

@@ -1,6 +1,7 @@
 import React from 'react';
 import React from 'react';
 import { useHistory } from 'react-router-dom';
 import { useHistory } from 'react-router-dom';
 import { ConsumerGroup } from 'generated-sources';
 import { ConsumerGroup } from 'generated-sources';
+import ConsumerGroupStateTag from 'components/common/ConsumerGroupState/ConsumerGroupStateTag';
 
 
 const ListItem: React.FC<{ consumerGroup: ConsumerGroup }> = ({
 const ListItem: React.FC<{ consumerGroup: ConsumerGroup }> = ({
   consumerGroup,
   consumerGroup,
@@ -8,14 +9,19 @@ const ListItem: React.FC<{ consumerGroup: ConsumerGroup }> = ({
   const history = useHistory();
   const history = useHistory();
 
 
   function goToConsumerGroupDetails() {
   function goToConsumerGroupDetails() {
-    history.push(`consumer-groups/${consumerGroup.consumerGroupId}`);
+    history.push(`consumer-groups/${consumerGroup.groupId}`);
   }
   }
 
 
   return (
   return (
     <tr className="is-clickable" onClick={goToConsumerGroupDetails}>
     <tr className="is-clickable" onClick={goToConsumerGroupDetails}>
-      <td>{consumerGroup.consumerGroupId}</td>
-      <td>{consumerGroup.numConsumers}</td>
-      <td>{consumerGroup.numTopics}</td>
+      <td>{consumerGroup.groupId}</td>
+      <td>{consumerGroup.members}</td>
+      <td>{consumerGroup.topics}</td>
+      <td>{consumerGroup.messagesBehind}</td>
+      <td>{consumerGroup.coordinator?.id}</td>
+      <td>
+        <ConsumerGroupStateTag state={consumerGroup.state} />
+      </td>
     </tr>
     </tr>
   );
   );
 };
 };

Some files were not shown because too many files changed in this diff