瀏覽代碼

Merge 7d14a3301d0de2bf2a04c3c2680921400fc26f05 into cca2c9699755c2128bb88cf8920c9ed4414dbd58

Ilya Kuramshin 1 年之前
父節點
當前提交
88437b8822
共有 87 個文件被更改,包括 4504 次插入988 次删除
  1. 14 1
      documentation/compose/kafka-ui-arm64.yaml
  2. 14 0
      documentation/compose/scripts/prometheus.yaml
  3. 17 0
      kafka-ui-api/pom.xml
  4. 176 0
      kafka-ui-api/src/main/antlr4/promql/PromQLLexer.g4
  5. 114 0
      kafka-ui-api/src/main/antlr4/promql/PromQLParser.g4
  6. 31 5
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java
  7. 2 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/AbstractAuthSecurityConfig.java
  8. 79 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/GraphsController.java
  9. 32 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/PrometheusExposeController.java
  10. 26 22
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java
  11. 11 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/DescribeLogDirsMapper.java
  12. 3 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBroker.java
  13. 0 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBrokerDiskUsage.java
  14. 0 55
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalClusterMetrics.java
  15. 17 14
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalClusterState.java
  16. 51 12
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalLogDirStats.java
  17. 8 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalPartitionsOffsets.java
  18. 0 13
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSegmentSizeDto.java
  19. 18 19
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopic.java
  20. 5 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java
  21. 22 20
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/Metrics.java
  22. 0 22
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/MetricsConfig.java
  23. 46 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/MetricsScrapeProperties.java
  24. 10 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/PartitionDistributionStats.java
  25. 9 6
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/Statistics.java
  26. 7 6
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerService.java
  27. 2 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStatisticsScheduler.java
  28. 58 28
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaClusterFactory.java
  29. 7 2
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java
  30. 3 10
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ReactiveAdminClient.java
  31. 10 18
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/StatisticsCache.java
  32. 25 37
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/StatisticsService.java
  33. 30 24
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/TopicsService.java
  34. 25 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/GraphDescription.java
  35. 74 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/GraphDescriptions.java
  36. 95 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/GraphsService.java
  37. 35 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/PromQueryLangGrammar.java
  38. 51 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/PromQueryTemplate.java
  39. 12 11
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/integration/odd/TopicsExporter.java
  40. 0 69
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/MetricsCollector.java
  41. 0 9
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/MetricsRetriever.java
  42. 0 46
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/PrometheusEndpointMetricsParser.java
  43. 0 70
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/PrometheusMetricsRetriever.java
  44. 22 37
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/RawMetric.java
  45. 73 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/SummarizedMetrics.java
  46. 0 67
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/WellKnownMetrics.java
  47. 123 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/prometheus/PrometheusExpose.java
  48. 83 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/IoRatesMetricsScanner.java
  49. 94 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/MetricsScrapping.java
  50. 19 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/PerBrokerScrapedMetrics.java
  51. 198 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/ScrapedClusterState.java
  52. 24 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/inferred/InferredMetrics.java
  53. 226 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/inferred/InferredMetricsScraper.java
  54. 3 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxMetricsFormatter.java
  55. 31 31
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxMetricsRetriever.java
  56. 36 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxMetricsScraper.java
  57. 2 3
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxSslSocketFactory.java
  58. 317 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusEndpointParser.java
  59. 54 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetriever.java
  60. 30 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusScraper.java
  61. 73 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/sink/KafkaSink.java
  62. 56 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/sink/MetricsSink.java
  63. 62 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/sink/PrometheusPushGatewaySink.java
  64. 79 0
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/sink/PrometheusRemoteWriteSink.java
  65. 15 1
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/KafkaServicesValidation.java
  66. 0 6
      kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ReactiveFailover.java
  67. 19 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/container/PrometheusContainer.java
  68. 15 20
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/model/PartitionDistributionStatsTest.java
  69. 8 8
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/TopicsServicePaginationTest.java
  70. 53 39
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/integration/odd/TopicsExporterTest.java
  71. 2 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ksql/KsqlApiClientTest.java
  72. 0 30
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/PrometheusEndpointMetricsParserTest.java
  73. 0 97
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/PrometheusMetricsRetrieverTest.java
  74. 0 93
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/WellKnownMetricsTest.java
  75. 53 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/prometheus/PrometheusExposeTest.java
  76. 75 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/IoRatesMetricsScannerTest.java
  77. 121 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/inferred/InferredMetricsScraperTest.java
  78. 3 2
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxMetricsFormatterTest.java
  79. 186 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusEndpointParserTest.java
  80. 118 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetrieverTest.java
  81. 62 0
      kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/sink/PrometheusRemoteWriteSinkTest.java
  82. 63 1
      kafka-ui-contract/pom.xml
  83. 133 0
      kafka-ui-contract/src/main/resources/proto/prometheus-remote-write-api/gogoproto/gogo.proto
  84. 88 0
      kafka-ui-contract/src/main/resources/proto/prometheus-remote-write-api/remote.proto
  85. 187 0
      kafka-ui-contract/src/main/resources/proto/prometheus-remote-write-api/types.proto
  86. 194 1
      kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml
  87. 365 0
      kafka-ui-contract/src/main/resources/swagger/prometheus-query-api.yaml

+ 14 - 1
documentation/compose/kafka-ui-arm64.yaml

@@ -13,16 +13,29 @@ services:
       - schema-registry0
       - kafka-connect0
     environment:
+      DYNAMIC_CONFIG_ENABLED: 'true'  # not necessary, added for tests
       KAFKA_CLUSTERS_0_NAME: local
       KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
       KAFKA_CLUSTERS_0_METRICS_PORT: 9997
+      KAFKA_CLUSTERS_0_METRICS_STORE_PROMETHEUS_URL: "http://prometheus:9090"
+      KAFKA_CLUSTERS_0_METRICS_STORE_PROMETHEUS_REMOTEWRITE: 'true'
+      KAFKA_CLUSTERS_0_METRICS_STORE_KAFKA_TOPIC: "kafka_metrics"
       KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schema-registry0:8085
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
       KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
-      DYNAMIC_CONFIG_ENABLED: 'true'  # not necessary, added for tests
       KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
       KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
 
+  prometheus:
+    image: prom/prometheus:latest
+    hostname: prometheus
+    container_name: prometheus
+    ports:
+      - 9090:9090
+    volumes:
+      - ./scripts:/etc/prometheus
+    command: --web.enable-remote-write-receiver  --config.file=/etc/prometheus/prometheus.yaml
+
   kafka0:
     image: confluentinc/cp-kafka:7.2.1.arm64
     hostname: kafka0

+ 14 - 0
documentation/compose/scripts/prometheus.yaml

@@ -0,0 +1,14 @@
+global:
+  scrape_interval: 30s
+  scrape_timeout: 10s
+
+rule_files:
+  - alert.yml
+
+scrape_configs:
+  - job_name: services
+    metrics_path: /metrics
+    static_configs:
+      - targets:
+          - 'prometheus:9090'
+#          - 'kafka-ui:8080'

+ 17 - 0
kafka-ui-api/pom.xml

@@ -239,6 +239,23 @@
             <artifactId>spring-security-ldap</artifactId>
         </dependency>
 
+        <dependency>
+            <groupId>io.prometheus</groupId>
+            <artifactId>simpleclient</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>io.prometheus</groupId>
+            <artifactId>simpleclient_common</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>io.prometheus</groupId>
+            <artifactId>simpleclient_pushgateway</artifactId>
+        </dependency>
+        <dependency>
+            <groupId>org.xerial.snappy</groupId>
+            <artifactId>snappy-java</artifactId>
+            <version>1.1.8.4</version>
+        </dependency>
 
         <dependency>
             <groupId>org.codehaus.groovy</groupId>

+ 176 - 0
kafka-ui-api/src/main/antlr4/promql/PromQLLexer.g4

@@ -0,0 +1,176 @@
+lexer grammar PromQLLexer;
+
+channels { WHITESPACE, COMMENTS }
+
+// All keywords in PromQL are case insensitive, it is just function,
+// label and metric names that are not.
+options { caseInsensitive=true; }
+
+fragment NUMERAL: [0-9]+ ('.' [0-9]+)?;
+
+fragment SCIENTIFIC_NUMBER
+   : NUMERAL ('e' [-+]? NUMERAL)?
+   ;
+
+NUMBER
+    : NUMERAL
+    | SCIENTIFIC_NUMBER;
+
+STRING
+    : '\'' (~('\'' | '\\') | '\\' .)* '\''
+    | '"' (~('"' | '\\') | '\\' .)* '"'
+    ;
+
+// Binary operators
+
+ADD:  '+';
+SUB:  '-';
+MULT: '*';
+DIV:  '/';
+MOD:  '%';
+POW:  '^';
+
+AND:    'and';
+OR:     'or';
+UNLESS: 'unless';
+
+// Comparison operators
+
+EQ:  '=';
+DEQ: '==';
+NE:  '!=';
+GT:  '>';
+LT:  '<';
+GE:  '>=';
+LE:  '<=';
+RE:  '=~';
+NRE: '!~';
+
+// Aggregation modifiers
+
+BY:      'by';
+WITHOUT: 'without';
+
+// Join modifiers
+
+ON:          'on';
+IGNORING:    'ignoring';
+GROUP_LEFT:  'group_left';
+GROUP_RIGHT: 'group_right';
+
+OFFSET: 'offset';
+
+BOOL: 'bool';
+
+AGGREGATION_OPERATOR
+    : 'sum'
+    | 'min'
+    | 'max'
+    | 'avg'
+    | 'group'
+    | 'stddev'
+    | 'stdvar'
+    | 'count'
+    | 'count_values'
+    | 'bottomk'
+    | 'topk'
+    | 'quantile'
+    ;
+
+FUNCTION options { caseInsensitive=false; }
+    : 'abs'
+    | 'absent'
+    | 'absent_over_time'
+    | 'ceil'
+    | 'changes'
+    | 'clamp_max'
+    | 'clamp_min'
+    | 'day_of_month'
+    | 'day_of_week'
+    | 'days_in_month'
+    | 'delta'
+    | 'deriv'
+    | 'exp'
+    | 'floor'
+    | 'histogram_quantile'
+    | 'holt_winters'
+    | 'hour'
+    | 'idelta'
+    | 'increase'
+    | 'irate'
+    | 'label_join'
+    | 'label_replace'
+    | 'ln'
+    | 'log2'
+    | 'log10'
+    | 'minute'
+    | 'month'
+    | 'predict_linear'
+    | 'rate'
+    | 'resets'
+    | 'round'
+    | 'scalar'
+    | 'sort'
+    | 'sort_desc'
+    | 'sqrt'
+    | 'time'
+    | 'timestamp'
+    | 'vector'
+    | 'year'
+    | 'avg_over_time'
+    | 'min_over_time'
+    | 'max_over_time'
+    | 'sum_over_time'
+    | 'count_over_time'
+    | 'quantile_over_time'
+    | 'stddev_over_time'
+    | 'stdvar_over_time'
+    | 'last_over_time'
+    | 'acos'
+    | 'acosh'
+    | 'asin'
+    | 'asinh'
+    | 'atan'
+    | 'atanh'
+    | 'cos'
+    | 'cosh'
+    | 'sin'
+    | 'sinh'
+    | 'tan'
+    | 'tanh'
+    | 'deg'
+    | 'pi'
+    | 'rad'
+    ;
+
+LEFT_BRACE:  '{';
+RIGHT_BRACE: '}';
+
+LEFT_PAREN:  '(';
+RIGHT_PAREN: ')';
+
+LEFT_BRACKET:  '[';
+RIGHT_BRACKET: ']';
+
+COMMA: ',';
+
+AT: '@';
+
+SUBQUERY_RANGE
+     : LEFT_BRACKET DURATION ':' DURATION? RIGHT_BRACKET;
+
+TIME_RANGE
+    : LEFT_BRACKET DURATION RIGHT_BRACKET;
+
+// The proper order (longest to the shortest) must be validated after parsing
+DURATION: ([0-9]+ ('ms' | [smhdwy]))+;
+
+METRIC_NAME: [a-z_:] [a-z0-9_:]*;
+LABEL_NAME:  [a-z_] [a-z0-9_]*;
+
+
+
+WS: [\r\t\n ]+ -> channel(WHITESPACE);
+SL_COMMENT
+    : '#' .*? '\n' -> channel(COMMENTS)
+    ;

+ 114 - 0
kafka-ui-api/src/main/antlr4/promql/PromQLParser.g4

@@ -0,0 +1,114 @@
+parser grammar PromQLParser;
+
+options { tokenVocab = PromQLLexer; }
+
+expression: vectorOperation EOF;
+
+// Binary operations are ordered by precedence
+
+// Unary operations have the same precedence as multiplications
+
+vectorOperation
+    : <assoc=right> vectorOperation powOp vectorOperation
+    | <assoc=right> vectorOperation subqueryOp
+    | unaryOp vectorOperation
+    | vectorOperation multOp vectorOperation
+    | vectorOperation addOp vectorOperation
+    | vectorOperation compareOp vectorOperation
+    | vectorOperation andUnlessOp vectorOperation
+    | vectorOperation orOp vectorOperation
+    | vectorOperation vectorMatchOp vectorOperation
+    | vectorOperation AT vectorOperation
+    | vector
+    ;
+
+// Operators
+
+unaryOp:        (ADD | SUB);
+powOp:          POW grouping?;
+multOp:         (MULT | DIV | MOD) grouping?;
+addOp:          (ADD | SUB) grouping?;
+compareOp:      (DEQ | NE | GT | LT | GE | LE) BOOL? grouping?;
+andUnlessOp:    (AND | UNLESS) grouping?;
+orOp:           OR grouping?;
+vectorMatchOp:  (ON | UNLESS) grouping?;
+subqueryOp:     SUBQUERY_RANGE offsetOp?;
+offsetOp:       OFFSET DURATION;
+
+vector
+    : function_
+    | aggregation
+    | instantSelector
+    | matrixSelector
+    | offset
+    | literal
+    | parens
+    ;
+
+parens: LEFT_PAREN vectorOperation RIGHT_PAREN;
+
+// Selectors
+
+instantSelector
+    : METRIC_NAME (LEFT_BRACE labelMatcherList? RIGHT_BRACE)?
+    | LEFT_BRACE labelMatcherList RIGHT_BRACE
+    ;
+
+labelMatcher:         labelName labelMatcherOperator STRING;
+labelMatcherOperator: EQ | NE | RE | NRE;
+labelMatcherList:     labelMatcher (COMMA labelMatcher)* COMMA?;
+
+matrixSelector: instantSelector TIME_RANGE;
+
+offset
+    : instantSelector OFFSET DURATION
+    | matrixSelector OFFSET DURATION
+    ;
+
+// Functions
+
+function_: FUNCTION LEFT_PAREN (parameter (COMMA parameter)*)? RIGHT_PAREN;
+
+parameter:     literal | vectorOperation;
+parameterList: LEFT_PAREN (parameter (COMMA parameter)*)? RIGHT_PAREN;
+
+// Aggregations
+
+aggregation
+    : AGGREGATION_OPERATOR parameterList
+    | AGGREGATION_OPERATOR (by | without) parameterList
+    | AGGREGATION_OPERATOR parameterList ( by | without)
+    ;
+by:      BY labelNameList;
+without: WITHOUT labelNameList;
+
+// Vector one-to-one/one-to-many joins
+
+grouping:   (on_ | ignoring) (groupLeft | groupRight)?;
+on_:         ON labelNameList;
+ignoring:   IGNORING labelNameList;
+groupLeft:  GROUP_LEFT labelNameList?;
+groupRight: GROUP_RIGHT labelNameList?;
+
+// Label names
+
+labelName:     keyword | METRIC_NAME | LABEL_NAME;
+labelNameList: LEFT_PAREN (labelName (COMMA labelName)*)? RIGHT_PAREN;
+
+keyword
+    : AND
+    | OR
+    | UNLESS
+    | BY
+    | WITHOUT
+    | ON
+    | IGNORING
+    | GROUP_LEFT
+    | GROUP_RIGHT
+    | OFFSET
+    | BOOL
+    | AGGREGATION_OPERATOR
+    | FUNCTION
+    ;
+
+literal: NUMBER | STRING;

+ 31 - 5
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/ClustersProperties.java

@@ -1,6 +1,7 @@
 package com.provectus.kafka.ui.config;
 
-import com.provectus.kafka.ui.model.MetricsConfig;
+import static com.provectus.kafka.ui.model.MetricsScrapeProperties.JMX_METRICS_TYPE;
+
 import jakarta.annotation.PostConstruct;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -42,7 +43,7 @@ public class ClustersProperties {
     KsqldbServerAuth ksqldbServerAuth;
     KeystoreConfig ksqldbServerSsl;
     List<ConnectCluster> kafkaConnect;
-    MetricsConfigData metrics;
+    MetricsConfig metrics;
     Map<String, Object> properties;
     boolean readOnly = false;
     List<SerdeConfig> serde;
@@ -62,8 +63,8 @@ public class ClustersProperties {
   }
 
   @Data
-  @ToString(exclude = "password")
-  public static class MetricsConfigData {
+  @ToString(exclude = {"password", "keystorePassword"})
+  public static class MetricsConfig {
     String type;
     Integer port;
     Boolean ssl;
@@ -71,6 +72,31 @@ public class ClustersProperties {
     String password;
     String keystoreLocation;
     String keystorePassword;
+
+    Boolean prometheusExpose;
+    MetricsStorage store;
+  }
+
+  @Data
+  public static class MetricsStorage {
+    PrometheusStorage prometheus;
+    KafkaMetricsStorage kafka;
+  }
+
+  @Data
+  public static class KafkaMetricsStorage  {
+    String topic;
+  }
+
+  @Data
+  @ToString(exclude = {"pushGatewayPassword"})
+  public static class PrometheusStorage {
+    String url;
+    String pushGatewayUrl;
+    String pushGatewayUsername;
+    String pushGatewayPassword;
+    String pushGatewayJobName;
+    Boolean remoteWrite;
   }
 
   @Data
@@ -171,7 +197,7 @@ public class ClustersProperties {
   private void setMetricsDefaults() {
     for (Cluster cluster : clusters) {
       if (cluster.getMetrics() != null && !StringUtils.hasText(cluster.getMetrics().getType())) {
-        cluster.getMetrics().setType(MetricsConfig.JMX_METRICS_TYPE);
+        cluster.getMetrics().setType(JMX_METRICS_TYPE);
       }
     }
   }

+ 2 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/config/auth/AbstractAuthSecurityConfig.java

@@ -18,7 +18,8 @@ abstract class AbstractAuthSecurityConfig {
       "/login",
       "/logout",
       "/oauth2/**",
-      "/static/**"
+      "/static/**",
+      "/metrics"
   };
 
 }

+ 79 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/GraphsController.java

@@ -0,0 +1,79 @@
+package com.provectus.kafka.ui.controller;
+
+import com.provectus.kafka.ui.api.GraphsApi;
+import com.provectus.kafka.ui.model.GraphDataRequestDTO;
+import com.provectus.kafka.ui.model.GraphDescriptionDTO;
+import com.provectus.kafka.ui.model.GraphDescriptionsDTO;
+import com.provectus.kafka.ui.model.GraphParameterDTO;
+import com.provectus.kafka.ui.model.PrometheusApiQueryResponseDTO;
+import com.provectus.kafka.ui.model.rbac.AccessContext;
+import com.provectus.kafka.ui.service.graphs.GraphDescription;
+import com.provectus.kafka.ui.service.graphs.GraphsService;
+import java.time.Duration;
+import java.time.OffsetDateTime;
+import java.util.Optional;
+import lombok.RequiredArgsConstructor;
+import org.mapstruct.Mapper;
+import org.mapstruct.factory.Mappers;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.server.ServerWebExchange;
+import prometheus.query.model.QueryResponse;
+import reactor.core.publisher.Mono;
+
+@RestController
+@RequiredArgsConstructor
+public class GraphsController extends AbstractController implements GraphsApi {
+
+  private static final PrometheusApiMapper MAPPER = Mappers.getMapper(PrometheusApiMapper.class);
+
+  @Mapper
+  interface PrometheusApiMapper {
+    PrometheusApiQueryResponseDTO fromClientResponse(QueryResponse resp);
+  }
+
+  private final GraphsService graphsService;
+
+  @Override
+  public Mono<ResponseEntity<PrometheusApiQueryResponseDTO>> getGraphData(String clusterName,
+                                                                          Mono<GraphDataRequestDTO> graphDataRequestDto,
+                                                                          ServerWebExchange exchange) {
+    var context = AccessContext.builder()
+        .cluster(clusterName)
+        .operationName("getGraphData")
+        .build();
+
+    return accessControlService.validateAccess(context)
+        .then(
+            graphDataRequestDto.flatMap(req ->
+                    graphsService.getGraphData(
+                        getCluster(clusterName),
+                        req.getId(),
+                        Optional.ofNullable(req.getFrom()).map(OffsetDateTime::toInstant).orElse(null),
+                        Optional.ofNullable(req.getTo()).map(OffsetDateTime::toInstant).orElse(null),
+                        req.getParameters()
+                    ).map(MAPPER::fromClientResponse))
+                .map(ResponseEntity::ok)
+        ).doOnEach(sig -> auditService.audit(context, sig));
+  }
+
+  @Override
+  public Mono<ResponseEntity<GraphDescriptionsDTO>> getGraphsList(String clusterName,
+                                                                  ServerWebExchange exchange) {
+    var context = AccessContext.builder()
+        .cluster(clusterName)
+        .operationName("getGraphsList")
+        .build();
+
+    var graphs = graphsService.getGraphs(getCluster(clusterName));
+    return accessControlService.validateAccess(context).then(
+        Mono.just(ResponseEntity.ok(new GraphDescriptionsDTO().graphs(graphs.map(this::map).toList()))));
+  }
+
+  private GraphDescriptionDTO map(GraphDescription graph) {
+    return new GraphDescriptionDTO(graph.id())
+        .defaultPeriod(Optional.ofNullable(graph.defaultInterval()).map(Duration::toString).orElse(null))
+        .type(graph.isRange() ? GraphDescriptionDTO.TypeEnum.RANGE : GraphDescriptionDTO.TypeEnum.INSTANT)
+        .parameters(graph.params().stream().map(GraphParameterDTO::new).toList());
+  }
+}

+ 32 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/PrometheusExposeController.java

@@ -0,0 +1,32 @@
+package com.provectus.kafka.ui.controller;
+
+import com.provectus.kafka.ui.api.PrometheusExposeApi;
+import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.service.StatisticsCache;
+import com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose;
+import java.util.stream.Collectors;
+import lombok.RequiredArgsConstructor;
+import org.springframework.http.ResponseEntity;
+import org.springframework.web.bind.annotation.RestController;
+import org.springframework.web.server.ServerWebExchange;
+import reactor.core.publisher.Mono;
+
+@RestController
+@RequiredArgsConstructor
+public class PrometheusExposeController extends AbstractController implements PrometheusExposeApi {
+
+  private final StatisticsCache statisticsCache;
+
+  @Override
+  public Mono<ResponseEntity<String>> getAllMetrics(ServerWebExchange exchange) {
+    return Mono.just(
+        PrometheusExpose.exposeAllMetrics(
+            clustersStorage.getKafkaClusters()
+                .stream()
+                .filter(KafkaCluster::isExposeMetricsViaPrometheusEndpoint)
+                .collect(Collectors.toMap(KafkaCluster::getName, c -> statisticsCache.get(c).getMetrics()))
+        )
+    );
+  }
+
+}

+ 26 - 22
kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/ClusterMapper.java

@@ -1,9 +1,12 @@
 package com.provectus.kafka.ui.mapper;
 
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static java.util.stream.Collectors.toList;
+import static java.util.stream.Collectors.toMap;
+
 import com.provectus.kafka.ui.config.ClustersProperties;
 import com.provectus.kafka.ui.model.BrokerConfigDTO;
 import com.provectus.kafka.ui.model.BrokerDTO;
-import com.provectus.kafka.ui.model.BrokerDiskUsageDTO;
 import com.provectus.kafka.ui.model.BrokerMetricsDTO;
 import com.provectus.kafka.ui.model.ClusterDTO;
 import com.provectus.kafka.ui.model.ClusterFeature;
@@ -14,7 +17,6 @@ import com.provectus.kafka.ui.model.ConfigSynonymDTO;
 import com.provectus.kafka.ui.model.ConnectDTO;
 import com.provectus.kafka.ui.model.InternalBroker;
 import com.provectus.kafka.ui.model.InternalBrokerConfig;
-import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
 import com.provectus.kafka.ui.model.InternalClusterState;
 import com.provectus.kafka.ui.model.InternalPartition;
 import com.provectus.kafka.ui.model.InternalReplica;
@@ -30,10 +32,13 @@ import com.provectus.kafka.ui.model.ReplicaDTO;
 import com.provectus.kafka.ui.model.TopicConfigDTO;
 import com.provectus.kafka.ui.model.TopicDTO;
 import com.provectus.kafka.ui.model.TopicDetailsDTO;
-import com.provectus.kafka.ui.service.metrics.RawMetric;
+import com.provectus.kafka.ui.service.metrics.SummarizedMetrics;
+import java.math.BigDecimal;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
 import org.apache.kafka.clients.admin.ConfigEntry;
 import org.apache.kafka.common.acl.AccessControlEntry;
 import org.apache.kafka.common.acl.AclBinding;
@@ -52,21 +57,28 @@ public interface ClusterMapper {
 
   ClusterStatsDTO toClusterStats(InternalClusterState clusterState);
 
+  @Deprecated
   default ClusterMetricsDTO toClusterMetrics(Metrics metrics) {
     return new ClusterMetricsDTO()
-        .items(metrics.getSummarizedMetrics().map(this::convert).collect(Collectors.toList()));
+        .items(convert(new SummarizedMetrics(metrics).asStream()).toList());
   }
 
-  private MetricDTO convert(RawMetric rawMetric) {
-    return new MetricDTO()
-        .name(rawMetric.name())
-        .labels(rawMetric.labels())
-        .value(rawMetric.value());
+  private Stream<MetricDTO> convert(Stream<MetricFamilySamples> metrics) {
+    return metrics
+        .flatMap(m -> m.samples.stream())
+        .map(s ->
+            new MetricDTO()
+                .name(s.name)
+                .labels(IntStream.range(0, s.labelNames.size())
+                    .boxed()
+                    //collecting to map, keeping order
+                    .collect(toMap(s.labelNames::get, s.labelValues::get, (m1, m2) -> null, LinkedHashMap::new)))
+                .value(BigDecimal.valueOf(s.value))
+        );
   }
 
-  default BrokerMetricsDTO toBrokerMetrics(List<RawMetric> metrics) {
-    return new BrokerMetricsDTO()
-        .metrics(metrics.stream().map(this::convert).collect(Collectors.toList()));
+  default BrokerMetricsDTO toBrokerMetrics(List<MetricFamilySamples> metrics) {
+    return new BrokerMetricsDTO().metrics(convert(metrics.stream()).toList());
   }
 
   @Mapping(target = "isSensitive", source = "sensitive")
@@ -107,15 +119,7 @@ public interface ClusterMapper {
   List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<ClusterFeature> features);
 
   default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
-    return map.values().stream().map(this::toPartition).collect(Collectors.toList());
-  }
-
-  default BrokerDiskUsageDTO map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
-    final BrokerDiskUsageDTO brokerDiskUsage = new BrokerDiskUsageDTO();
-    brokerDiskUsage.setBrokerId(id);
-    brokerDiskUsage.segmentCount((int) internalBrokerDiskUsage.getSegmentCount());
-    brokerDiskUsage.segmentSize(internalBrokerDiskUsage.getSegmentSize());
-    return brokerDiskUsage;
+    return map.values().stream().map(this::toPartition).collect(toList());
   }
 
   static KafkaAclDTO.OperationEnum mapAclOperation(AclOperation operation) {

+ 11 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/mapper/DescribeLogDirsMapper.java

@@ -7,6 +7,8 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Map;
 import java.util.stream.Collectors;
+import org.apache.kafka.clients.admin.LogDirDescription;
+import org.apache.kafka.clients.admin.ReplicaInfo;
 import org.apache.kafka.common.TopicPartition;
 import org.apache.kafka.common.protocol.Errors;
 import org.apache.kafka.common.requests.DescribeLogDirsResponse;
@@ -16,7 +18,7 @@ import org.springframework.stereotype.Component;
 public class DescribeLogDirsMapper {
 
   public List<BrokersLogdirsDTO> toBrokerLogDirsList(
-      Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> logDirsInfo) {
+      Map<Integer, Map<String, LogDirDescription>> logDirsInfo) {
 
     return logDirsInfo.entrySet().stream().map(
         mapEntry -> mapEntry.getValue().entrySet().stream()
@@ -26,13 +28,13 @@ public class DescribeLogDirsMapper {
   }
 
   private BrokersLogdirsDTO toBrokerLogDirs(Integer broker, String dirName,
-                                            DescribeLogDirsResponse.LogDirInfo logDirInfo) {
+                                            LogDirDescription logDirInfo) {
     BrokersLogdirsDTO result = new BrokersLogdirsDTO();
     result.setName(dirName);
-    if (logDirInfo.error != null && logDirInfo.error != Errors.NONE) {
-      result.setError(logDirInfo.error.message());
+    if (logDirInfo.error() != null) {
+      result.setError(logDirInfo.error().getMessage());
     }
-    var topics = logDirInfo.replicaInfos.entrySet().stream()
+    var topics = logDirInfo.replicaInfos().entrySet().stream()
         .collect(Collectors.groupingBy(e -> e.getKey().topic())).entrySet().stream()
         .map(e -> toTopicLogDirs(broker, e.getKey(), e.getValue()))
         .collect(Collectors.toList());
@@ -41,8 +43,7 @@ public class DescribeLogDirsMapper {
   }
 
   private BrokerTopicLogdirsDTO toTopicLogDirs(Integer broker, String name,
-                                               List<Map.Entry<TopicPartition,
-                                                   DescribeLogDirsResponse.ReplicaInfo>> partitions) {
+                                               List<Map.Entry<TopicPartition, ReplicaInfo>> partitions) {
     BrokerTopicLogdirsDTO topic = new BrokerTopicLogdirsDTO();
     topic.setName(name);
     topic.setPartitions(
@@ -54,13 +55,12 @@ public class DescribeLogDirsMapper {
   }
 
   private BrokerTopicPartitionLogdirDTO topicPartitionLogDir(Integer broker, Integer partition,
-                                                             DescribeLogDirsResponse.ReplicaInfo
-                                                                 replicaInfo) {
+                                                             ReplicaInfo replicaInfo) {
     BrokerTopicPartitionLogdirDTO logDir = new BrokerTopicPartitionLogdirDTO();
     logDir.setBroker(broker);
     logDir.setPartition(partition);
-    logDir.setSize(replicaInfo.size);
-    logDir.setOffsetLag(replicaInfo.offsetLag);
+    logDir.setSize(replicaInfo.size());
+    logDir.setOffsetLag(replicaInfo.offsetLag());
     return logDir;
   }
 }

+ 3 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBroker.java

@@ -21,12 +21,12 @@ public class InternalBroker {
 
   public InternalBroker(Node node,
                         PartitionDistributionStats partitionDistribution,
-                        Statistics statistics) {
+                        Metrics metrics) {
     this.id = node.id();
     this.host = node.host();
     this.port = node.port();
-    this.bytesInPerSec = statistics.getMetrics().getBrokerBytesInPerSec().get(node.id());
-    this.bytesOutPerSec = statistics.getMetrics().getBrokerBytesOutPerSec().get(node.id());
+    this.bytesInPerSec = metrics.getIoRates().brokerBytesInPerSec().get(node.id());
+    this.bytesOutPerSec = metrics.getIoRates().brokerBytesOutPerSec().get(node.id());
     this.partitionsLeader = partitionDistribution.getPartitionLeaders().get(node);
     this.partitions = partitionDistribution.getPartitionsCount().get(node);
     this.inSyncPartitions = partitionDistribution.getInSyncPartitions().get(node);

+ 0 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalBrokerDiskUsage.java

@@ -1,11 +0,0 @@
-package com.provectus.kafka.ui.model;
-
-import lombok.Builder;
-import lombok.Data;
-
-@Data
-@Builder(toBuilder = true)
-public class InternalBrokerDiskUsage {
-  private final long segmentCount;
-  private final long segmentSize;
-}

+ 0 - 55
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalClusterMetrics.java

@@ -1,55 +0,0 @@
-package com.provectus.kafka.ui.model;
-
-import java.math.BigDecimal;
-import java.util.List;
-import java.util.Map;
-import javax.annotation.Nullable;
-import lombok.Builder;
-import lombok.Data;
-
-
-@Data
-@Builder(toBuilder = true)
-public class InternalClusterMetrics {
-
-  public static InternalClusterMetrics empty() {
-    return InternalClusterMetrics.builder()
-        .brokers(List.of())
-        .topics(Map.of())
-        .status(ServerStatusDTO.OFFLINE)
-        .internalBrokerMetrics(Map.of())
-        .metrics(List.of())
-        .version("unknown")
-        .build();
-  }
-
-  private final String version;
-
-  private final ServerStatusDTO status;
-  private final Throwable lastKafkaException;
-
-  private final int brokerCount;
-  private final int activeControllers;
-  private final List<Integer> brokers;
-
-  private final int topicCount;
-  private final Map<String, InternalTopic> topics;
-
-  // partitions stats
-  private final int underReplicatedPartitionCount;
-  private final int onlinePartitionCount;
-  private final int offlinePartitionCount;
-  private final int inSyncReplicasCount;
-  private final int outOfSyncReplicasCount;
-
-  // log dir stats
-  @Nullable // will be null if log dir collection disabled
-  private final Map<Integer, InternalBrokerDiskUsage> internalBrokerDiskUsage;
-
-  // metrics from metrics collector
-  private final BigDecimal bytesInPerSec;
-  private final BigDecimal bytesOutPerSec;
-  private final Map<Integer, BrokerMetrics> internalBrokerMetrics;
-  private final List<MetricDTO> metrics;
-
-}

+ 17 - 14
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalClusterState.java

@@ -36,39 +36,42 @@ public class InternalClusterState {
             .message(e.getMessage())
             .stackTrace(Throwables.getStackTraceAsString(e)))
         .orElse(null);
-    topicCount = statistics.getTopicDescriptions().size();
+    topicCount = (int) statistics.topicDescriptions().count();
     brokerCount = statistics.getClusterDescription().getNodes().size();
     activeControllers = Optional.ofNullable(statistics.getClusterDescription().getController())
         .map(Node::id)
         .orElse(null);
     version = statistics.getVersion();
 
-    if (statistics.getLogDirInfo() != null) {
-      diskUsage = statistics.getLogDirInfo().getBrokerStats().entrySet().stream()
-          .map(e -> new BrokerDiskUsageDTO()
-              .brokerId(e.getKey())
-              .segmentSize(e.getValue().getSegmentSize())
-              .segmentCount(e.getValue().getSegmentsCount()))
-          .collect(Collectors.toList());
-    }
+    diskUsage = statistics.getClusterState().getNodesStates().values().stream()
+        .filter(n -> n.segmentStats() != null)
+        .map(n -> new BrokerDiskUsageDTO()
+            .brokerId(n.id())
+            .segmentSize(n.segmentStats().getSegmentSize())
+            .segmentCount(n.segmentStats().getSegmentsCount()))
+        .collect(Collectors.toList());
 
     features = statistics.getFeatures();
 
     bytesInPerSec = statistics
         .getMetrics()
-        .getBrokerBytesInPerSec()
-        .values().stream()
+        .getIoRates()
+        .brokerBytesInPerSec()
+        .values()
+        .stream()
         .reduce(BigDecimal::add)
         .orElse(null);
 
     bytesOutPerSec = statistics
         .getMetrics()
-        .getBrokerBytesOutPerSec()
-        .values().stream()
+        .getIoRates()
+        .brokerBytesOutPerSec()
+        .values()
+        .stream()
         .reduce(BigDecimal::add)
         .orElse(null);
 
-    var partitionsStats = new PartitionsStats(statistics.getTopicDescriptions().values());
+    var partitionsStats = new PartitionsStats(statistics.topicDescriptions().toList());
     onlinePartitionCount = partitionsStats.getOnlinePartitionCount();
     offlinePartitionCount = partitionsStats.getOfflinePartitionCount();
     inSyncReplicasCount = partitionsStats.getInSyncReplicasCount();

+ 51 - 12
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalLogDirStats.java

@@ -3,14 +3,17 @@ package com.provectus.kafka.ui.model;
 import static java.util.stream.Collectors.collectingAndThen;
 import static java.util.stream.Collectors.groupingBy;
 import static java.util.stream.Collectors.summarizingLong;
-import static java.util.stream.Collectors.toList;
 
+import jakarta.annotation.Nullable;
+import java.util.HashMap;
 import java.util.List;
 import java.util.LongSummaryStatistics;
 import java.util.Map;
+import java.util.concurrent.atomic.LongAdder;
+import lombok.RequiredArgsConstructor;
 import lombok.Value;
+import org.apache.kafka.clients.admin.LogDirDescription;
 import org.apache.kafka.common.TopicPartition;
-import org.apache.kafka.common.requests.DescribeLogDirsResponse;
 import reactor.util.function.Tuple2;
 import reactor.util.function.Tuple3;
 import reactor.util.function.Tuples;
@@ -19,32 +22,39 @@ import reactor.util.function.Tuples;
 public class InternalLogDirStats {
 
   @Value
+  @RequiredArgsConstructor
   public static class SegmentStats {
-    long segmentSize;
-    int segmentsCount;
+    Long segmentSize;
+    Integer segmentsCount;
 
-    public SegmentStats(LongSummaryStatistics s) {
-      segmentSize = s.getSum();
-      segmentsCount = (int) s.getCount();
+    private SegmentStats(LongSummaryStatistics s) {
+      this(s.getSum(), (int) s.getCount());
     }
   }
 
+  public record LogDirSpaceStats(@Nullable Long totalBytes,
+                                 @Nullable Long usableBytes,
+                                 Map<String, Long> totalPerDir,
+                                 Map<String, Long> usablePerDir) {
+  }
+
   Map<TopicPartition, SegmentStats> partitionsStats;
   Map<String, SegmentStats> topicStats;
   Map<Integer, SegmentStats> brokerStats;
+  Map<Integer, LogDirSpaceStats> brokerDirsStats;
 
   public static InternalLogDirStats empty() {
     return new InternalLogDirStats(Map.of());
   }
 
-  public InternalLogDirStats(Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> log) {
+  public InternalLogDirStats(Map<Integer, Map<String, LogDirDescription>> logsInfo) {
     final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
-        log.entrySet().stream().flatMap(b ->
+        logsInfo.entrySet().stream().flatMap(b ->
             b.getValue().entrySet().stream().flatMap(topicMap ->
-                topicMap.getValue().replicaInfos.entrySet().stream()
-                    .map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
+                topicMap.getValue().replicaInfos().entrySet().stream()
+                    .map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size()))
             )
-        ).collect(toList());
+        ).toList();
 
     partitionsStats = topicPartitions.stream().collect(
         groupingBy(
@@ -64,5 +74,34 @@ public class InternalLogDirStats {
             Tuple2::getT1,
             collectingAndThen(
                 summarizingLong(Tuple3::getT3), SegmentStats::new)));
+
+    brokerDirsStats = calculateSpaceStats(logsInfo);
+  }
+
+  private static Map<Integer, LogDirSpaceStats> calculateSpaceStats(
+      Map<Integer, Map<String, LogDirDescription>> logsInfo) {
+
+    var stats = new HashMap<Integer, LogDirSpaceStats>();
+    logsInfo.forEach((brokerId, logDirStats) -> {
+      Map<String, Long> totalBytes = new HashMap<>();
+      Map<String, Long> usableBytes = new HashMap<>();
+      logDirStats.forEach((logDir, descr) -> {
+        if (descr.error() == null) {
+          return;
+        }
+        descr.totalBytes().ifPresent(b -> totalBytes.merge(logDir, b, Long::sum));
+        descr.usableBytes().ifPresent(b -> usableBytes.merge(logDir, b, Long::sum));
+      });
+      stats.put(
+          brokerId,
+          new LogDirSpaceStats(
+              totalBytes.isEmpty() ? null : totalBytes.values().stream().mapToLong(i -> i).sum(),
+              usableBytes.isEmpty() ? null : usableBytes.values().stream().mapToLong(i -> i).sum(),
+              totalBytes,
+              usableBytes
+          )
+      );
+    });
+    return stats;
   }
 }

+ 8 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalPartitionsOffsets.java

@@ -4,6 +4,7 @@ import com.google.common.collect.HashBasedTable;
 import com.google.common.collect.Table;
 import java.util.Map;
 import java.util.Optional;
+import java.util.stream.Collectors;
 import lombok.Value;
 import org.apache.kafka.common.TopicPartition;
 
@@ -30,4 +31,11 @@ public class InternalPartitionsOffsets {
     return Optional.ofNullable(offsets.get(topic, partition));
   }
 
+  public Map<Integer, Long> topicOffsets(String topic, boolean earliest) {
+    return offsets.row(topic)
+        .entrySet()
+        .stream()
+        .collect(Collectors.toMap(Map.Entry::getKey, e -> earliest ? e.getValue().earliest : e.getValue().getLatest()));
+  }
+
 }

+ 0 - 13
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalSegmentSizeDto.java

@@ -1,13 +0,0 @@
-package com.provectus.kafka.ui.model;
-
-import java.util.Map;
-import lombok.Builder;
-import lombok.Data;
-
-@Data
-@Builder(toBuilder = true)
-public class InternalSegmentSizeDto {
-
-  private final Map<String, InternalTopic> internalTopicWithSegmentSize;
-  private final InternalClusterMetrics clusterMetricsWithSegmentSize;
-}

+ 18 - 19
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/InternalTopic.java

@@ -1,23 +1,22 @@
 package com.provectus.kafka.ui.model;
 
-import com.provectus.kafka.ui.config.ClustersProperties;
+import static com.provectus.kafka.ui.model.InternalLogDirStats.SegmentStats;
+
 import java.math.BigDecimal;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.stream.Collectors;
 import javax.annotation.Nullable;
 import lombok.Builder;
 import lombok.Data;
 import org.apache.kafka.clients.admin.ConfigEntry;
 import org.apache.kafka.clients.admin.TopicDescription;
-import org.apache.kafka.common.TopicPartition;
 
 @Data
 @Builder(toBuilder = true)
 public class InternalTopic {
 
-  ClustersProperties clustersProperties;
-
   // from TopicDescription
   private final String name;
   private final boolean internal;
@@ -44,7 +43,8 @@ public class InternalTopic {
                                    List<ConfigEntry> configs,
                                    InternalPartitionsOffsets partitionsOffsets,
                                    Metrics metrics,
-                                   InternalLogDirStats logDirInfo,
+                                   @Nullable SegmentStats segmentStats,
+                                   @Nullable Map<Integer, SegmentStats> partitionsSegmentStats,
                                    @Nullable String internalTopicPrefix) {
     var topic = InternalTopic.builder();
 
@@ -81,13 +81,12 @@ public class InternalTopic {
                 partitionDto.offsetMax(offsets.getLatest());
               });
 
-          var segmentStats =
-              logDirInfo.getPartitionsStats().get(
-                  new TopicPartition(topicDescription.name(), partition.partition()));
-          if (segmentStats != null) {
-            partitionDto.segmentCount(segmentStats.getSegmentsCount());
-            partitionDto.segmentSize(segmentStats.getSegmentSize());
-          }
+          Optional.ofNullable(partitionsSegmentStats)
+              .flatMap(s -> Optional.ofNullable(s.get(partition.partition())))
+              .ifPresent(stats -> {
+                partitionDto.segmentCount(stats.getSegmentsCount());
+                partitionDto.segmentSize(stats.getSegmentSize());
+              });
 
           return partitionDto.build();
         })
@@ -108,14 +107,14 @@ public class InternalTopic {
             : topicDescription.partitions().get(0).replicas().size()
     );
 
-    var segmentStats = logDirInfo.getTopicStats().get(topicDescription.name());
-    if (segmentStats != null) {
-      topic.segmentCount(segmentStats.getSegmentsCount());
-      topic.segmentSize(segmentStats.getSegmentSize());
-    }
+    Optional.ofNullable(segmentStats)
+        .ifPresent(stats -> {
+          topic.segmentCount(stats.getSegmentsCount());
+          topic.segmentSize(stats.getSegmentSize());
+        });
 
-    topic.bytesInPerSec(metrics.getTopicBytesInPerSec().get(topicDescription.name()));
-    topic.bytesOutPerSec(metrics.getTopicBytesOutPerSec().get(topicDescription.name()));
+    topic.bytesInPerSec(metrics.getIoRates().topicBytesInPerSec().get(topicDescription.name()));
+    topic.bytesOutPerSec(metrics.getIoRates().topicBytesOutPerSec().get(topicDescription.name()));
 
     topic.topicConfigs(
         configs.stream().map(InternalTopicConfig::from).collect(Collectors.toList()));

+ 5 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/KafkaCluster.java

@@ -5,6 +5,7 @@ import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
 import com.provectus.kafka.ui.emitter.PollingSettings;
 import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
 import com.provectus.kafka.ui.service.masking.DataMasking;
+import com.provectus.kafka.ui.service.metrics.scrape.MetricsScrapping;
 import com.provectus.kafka.ui.sr.api.KafkaSrClientApi;
 import com.provectus.kafka.ui.util.ReactiveFailover;
 import java.util.Map;
@@ -13,6 +14,7 @@ import lombok.AccessLevel;
 import lombok.AllArgsConstructor;
 import lombok.Builder;
 import lombok.Data;
+import prometheus.query.api.PrometheusClientApi;
 
 @Data
 @Builder(toBuilder = true)
@@ -25,10 +27,12 @@ public class KafkaCluster {
   private final String bootstrapServers;
   private final Properties properties;
   private final boolean readOnly;
-  private final MetricsConfig metricsConfig;
+  private final boolean exposeMetricsViaPrometheusEndpoint;
   private final DataMasking masking;
   private final PollingSettings pollingSettings;
   private final ReactiveFailover<KafkaSrClientApi> schemaRegistryClient;
   private final Map<String, ReactiveFailover<KafkaConnectClientApi>> connectsClients;
   private final ReactiveFailover<KsqlApiClient> ksqlClient;
+  private final MetricsScrapping metricsScrapping;
+  private final ReactiveFailover<PrometheusClientApi> prometheusStorageClient;
 }

+ 22 - 20
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/Metrics.java

@@ -1,13 +1,11 @@
 package com.provectus.kafka.ui.model;
 
-import static java.util.stream.Collectors.toMap;
+import static io.prometheus.client.Collector.MetricFamilySamples;
 
-import com.provectus.kafka.ui.service.metrics.RawMetric;
+import com.provectus.kafka.ui.service.metrics.scrape.inferred.InferredMetrics;
 import java.math.BigDecimal;
-import java.util.Collection;
 import java.util.List;
 import java.util.Map;
-import java.util.stream.Stream;
 import lombok.Builder;
 import lombok.Value;
 
@@ -16,28 +14,32 @@ import lombok.Value;
 @Value
 public class Metrics {
 
-  Map<Integer, BigDecimal> brokerBytesInPerSec;
-  Map<Integer, BigDecimal> brokerBytesOutPerSec;
-  Map<String, BigDecimal> topicBytesInPerSec;
-  Map<String, BigDecimal> topicBytesOutPerSec;
-  Map<Integer, List<RawMetric>> perBrokerMetrics;
+  IoRates ioRates;
+  InferredMetrics inferredMetrics;
+  Map<Integer, List<MetricFamilySamples>> perBrokerScrapedMetrics;
 
   public static Metrics empty() {
     return Metrics.builder()
-        .brokerBytesInPerSec(Map.of())
-        .brokerBytesOutPerSec(Map.of())
-        .topicBytesInPerSec(Map.of())
-        .topicBytesOutPerSec(Map.of())
-        .perBrokerMetrics(Map.of())
+        .ioRates(IoRates.empty())
+        .perBrokerScrapedMetrics(Map.of())
+        .inferredMetrics(InferredMetrics.empty())
         .build();
   }
 
-  public Stream<RawMetric> getSummarizedMetrics() {
-    return perBrokerMetrics.values().stream()
-        .flatMap(Collection::stream)
-        .collect(toMap(RawMetric::identityKey, m -> m, (m1, m2) -> m1.copyWithValue(m1.value().add(m2.value()))))
-        .values()
-        .stream();
+  @Builder
+  public record IoRates(Map<Integer, BigDecimal> brokerBytesInPerSec,
+                        Map<Integer, BigDecimal> brokerBytesOutPerSec,
+                        Map<String, BigDecimal> topicBytesInPerSec,
+                        Map<String, BigDecimal> topicBytesOutPerSec) {
+
+    static IoRates empty() {
+      return IoRates.builder()
+          .brokerBytesOutPerSec(Map.of())
+          .brokerBytesInPerSec(Map.of())
+          .topicBytesOutPerSec(Map.of())
+          .topicBytesInPerSec(Map.of())
+          .build();
+    }
   }
 
 }

+ 0 - 22
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/MetricsConfig.java

@@ -1,22 +0,0 @@
-package com.provectus.kafka.ui.model;
-
-import lombok.AccessLevel;
-import lombok.AllArgsConstructor;
-import lombok.Builder;
-import lombok.Data;
-
-@Data
-@Builder(toBuilder = true)
-@AllArgsConstructor(access = AccessLevel.PRIVATE)
-public class MetricsConfig {
-  public static final String JMX_METRICS_TYPE = "JMX";
-  public static final String PROMETHEUS_METRICS_TYPE = "PROMETHEUS";
-
-  private final String type;
-  private final Integer port;
-  private final boolean ssl;
-  private final String username;
-  private final String password;
-  private final String keystoreLocation;
-  private final String keystorePassword;
-}

+ 46 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/MetricsScrapeProperties.java

@@ -0,0 +1,46 @@
+package com.provectus.kafka.ui.model;
+
+import static com.provectus.kafka.ui.config.ClustersProperties.KeystoreConfig;
+import static com.provectus.kafka.ui.config.ClustersProperties.TruststoreConfig;
+
+import com.provectus.kafka.ui.config.ClustersProperties;
+import jakarta.annotation.Nullable;
+import java.util.Objects;
+import java.util.Optional;
+import lombok.Builder;
+import lombok.Value;
+
+@Value
+@Builder
+public class MetricsScrapeProperties {
+  public static final String JMX_METRICS_TYPE = "JMX";
+  public static final String PROMETHEUS_METRICS_TYPE = "PROMETHEUS";
+
+  Integer port;
+  boolean ssl;
+  String username;
+  String password;
+
+  @Nullable
+  KeystoreConfig keystoreConfig;
+
+  @Nullable
+  TruststoreConfig truststoreConfig;
+
+  public static MetricsScrapeProperties create(ClustersProperties.Cluster cluster) {
+    var metrics = Objects.requireNonNull(cluster.getMetrics());
+    return MetricsScrapeProperties.builder()
+        .port(metrics.getPort())
+        .ssl(Optional.ofNullable(metrics.getSsl()).orElse(false))
+        .username(metrics.getUsername())
+        .password(metrics.getPassword())
+        .truststoreConfig(cluster.getSsl())
+        .keystoreConfig(
+            metrics.getKeystoreLocation() != null
+                ? new KeystoreConfig(metrics.getKeystoreLocation(), metrics.getKeystorePassword())
+                : null
+        )
+        .build();
+  }
+
+}

+ 10 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/PartitionDistributionStats.java

@@ -1,14 +1,17 @@
 package com.provectus.kafka.ui.model;
 
+import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
 import java.math.BigDecimal;
 import java.math.RoundingMode;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import javax.annotation.Nullable;
 import lombok.AccessLevel;
 import lombok.Getter;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
+import org.apache.commons.lang3.mutable.MutableInt;
 import org.apache.kafka.clients.admin.TopicDescription;
 import org.apache.kafka.common.Node;
 import org.apache.kafka.common.TopicPartitionInfo;
@@ -29,15 +32,19 @@ public class PartitionDistributionStats {
   private final boolean skewCanBeCalculated;
 
   public static PartitionDistributionStats create(Statistics stats) {
-    return create(stats, MIN_PARTITIONS_FOR_SKEW_CALCULATION);
+    return create(
+        stats.topicDescriptions().toList(),
+        MIN_PARTITIONS_FOR_SKEW_CALCULATION
+    );
   }
 
-  static PartitionDistributionStats create(Statistics stats, int minPartitionsForSkewCalculation) {
+  static PartitionDistributionStats create(List<TopicDescription> topicDescriptions,
+                                           int minPartitionsForSkewCalculation) {
     var partitionLeaders = new HashMap<Node, Integer>();
     var partitionsReplicated = new HashMap<Node, Integer>();
     var isr = new HashMap<Node, Integer>();
     int partitionsCnt = 0;
-    for (TopicDescription td : stats.getTopicDescriptions().values()) {
+    for (TopicDescription td : topicDescriptions) {
       for (TopicPartitionInfo tp : td.partitions()) {
         partitionsCnt++;
         tp.replicas().forEach(r -> incr(partitionsReplicated, r));

+ 9 - 6
kafka-ui-api/src/main/java/com/provectus/kafka/ui/model/Statistics.java

@@ -1,9 +1,11 @@
 package com.provectus.kafka.ui.model;
 
 import com.provectus.kafka.ui.service.ReactiveAdminClient;
+import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.stream.Stream;
 import lombok.Builder;
 import lombok.Value;
 import org.apache.kafka.clients.admin.ConfigEntry;
@@ -18,9 +20,7 @@ public class Statistics {
   List<ClusterFeature> features;
   ReactiveAdminClient.ClusterDescription clusterDescription;
   Metrics metrics;
-  InternalLogDirStats logDirInfo;
-  Map<String, TopicDescription> topicDescriptions;
-  Map<String, List<ConfigEntry>> topicConfigs;
+  ScrapedClusterState clusterState;
 
   public static Statistics empty() {
     return builder()
@@ -30,9 +30,12 @@ public class Statistics {
         .clusterDescription(
             new ReactiveAdminClient.ClusterDescription(null, null, List.of(), Set.of()))
         .metrics(Metrics.empty())
-        .logDirInfo(InternalLogDirStats.empty())
-        .topicDescriptions(Map.of())
-        .topicConfigs(Map.of())
+        .clusterState(ScrapedClusterState.empty())
         .build();
   }
+
+  public Stream<TopicDescription> topicDescriptions() {
+    return clusterState.getTopicStates().values().stream().map(ScrapedClusterState.TopicState::description);
+  }
+
 }

+ 7 - 6
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/BrokerService.java

@@ -1,5 +1,7 @@
 package com.provectus.kafka.ui.service;
 
+import static io.prometheus.client.Collector.MetricFamilySamples;
+
 import com.provectus.kafka.ui.exception.InvalidRequestApiException;
 import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
 import com.provectus.kafka.ui.exception.NotFoundException;
@@ -11,7 +13,6 @@ import com.provectus.kafka.ui.model.InternalBroker;
 import com.provectus.kafka.ui.model.InternalBrokerConfig;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.PartitionDistributionStats;
-import com.provectus.kafka.ui.service.metrics.RawMetric;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -21,13 +22,13 @@ import javax.annotation.Nullable;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.admin.ConfigEntry;
+import org.apache.kafka.clients.admin.LogDirDescription;
 import org.apache.kafka.common.Node;
 import org.apache.kafka.common.TopicPartitionReplica;
 import org.apache.kafka.common.errors.InvalidRequestException;
 import org.apache.kafka.common.errors.LogDirNotFoundException;
 import org.apache.kafka.common.errors.TimeoutException;
 import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
-import org.apache.kafka.common.requests.DescribeLogDirsResponse;
 import org.springframework.stereotype.Service;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
@@ -72,7 +73,7 @@ public class BrokerService {
         .get(cluster)
         .flatMap(ReactiveAdminClient::describeCluster)
         .map(description -> description.getNodes().stream()
-            .map(node -> new InternalBroker(node, partitionsDistribution, stats))
+            .map(node -> new InternalBroker(node, partitionsDistribution, stats.getMetrics()))
             .collect(Collectors.toList()))
         .flatMapMany(Flux::fromIterable);
   }
@@ -110,7 +111,7 @@ public class BrokerService {
         .doOnError(e -> log.error("Unexpected error", e));
   }
 
-  private Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> getClusterLogDirs(
+  private Mono<Map<Integer, Map<String, LogDirDescription>>> getClusterLogDirs(
       KafkaCluster cluster, List<Integer> reqBrokers) {
     return adminClientService.get(cluster)
         .flatMap(admin -> {
@@ -139,8 +140,8 @@ public class BrokerService {
     return getBrokersConfig(cluster, brokerId);
   }
 
-  public Mono<List<RawMetric>> getBrokerMetrics(KafkaCluster cluster, Integer brokerId) {
-    return Mono.justOrEmpty(statisticsCache.get(cluster).getMetrics().getPerBrokerMetrics().get(brokerId));
+  public Mono<List<MetricFamilySamples>> getBrokerMetrics(KafkaCluster cluster, Integer brokerId) {
+    return Mono.justOrEmpty(statisticsCache.get(cluster).getMetrics().getPerBrokerScrapedMetrics().get(brokerId));
   }
 
 }

+ 2 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ClustersStatisticsScheduler.java

@@ -22,9 +22,9 @@ public class ClustersStatisticsScheduler {
         .parallel()
         .runOn(Schedulers.parallel())
         .flatMap(cluster -> {
-          log.debug("Start getting metrics for kafkaCluster: {}", cluster.getName());
+          log.debug("Start collection statistics for cluster: {}", cluster.getName());
           return statisticsService.updateCache(cluster)
-              .doOnSuccess(m -> log.debug("Metrics updated for cluster: {}", cluster.getName()));
+              .doOnSuccess(m -> log.debug("Statistics updated for cluster: {}", cluster.getName()));
         })
         .then()
         .block();

+ 58 - 28
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/KafkaClusterFactory.java

@@ -1,5 +1,12 @@
 package com.provectus.kafka.ui.service;
 
+import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateClusterConnection;
+import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateConnect;
+import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateKsql;
+import static com.provectus.kafka.ui.util.KafkaServicesValidation.validatePrometheusStore;
+import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateSchemaRegistry;
+import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateTruststore;
+
 import com.provectus.kafka.ui.client.RetryingKafkaConnectClient;
 import com.provectus.kafka.ui.config.ClustersProperties;
 import com.provectus.kafka.ui.config.WebclientProperties;
@@ -8,9 +15,10 @@ import com.provectus.kafka.ui.emitter.PollingSettings;
 import com.provectus.kafka.ui.model.ApplicationPropertyValidationDTO;
 import com.provectus.kafka.ui.model.ClusterConfigValidationDTO;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.MetricsConfig;
 import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
 import com.provectus.kafka.ui.service.masking.DataMasking;
+import com.provectus.kafka.ui.service.metrics.scrape.MetricsScrapping;
+import com.provectus.kafka.ui.service.metrics.scrape.jmx.JmxMetricsRetriever;
 import com.provectus.kafka.ui.sr.ApiClient;
 import com.provectus.kafka.ui.sr.api.KafkaSrClientApi;
 import com.provectus.kafka.ui.util.KafkaServicesValidation;
@@ -22,11 +30,12 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.Properties;
 import java.util.stream.Stream;
-import javax.annotation.Nullable;
 import lombok.extern.slf4j.Slf4j;
 import org.springframework.stereotype.Service;
+import org.springframework.util.StringUtils;
 import org.springframework.util.unit.DataSize;
 import org.springframework.web.reactive.function.client.WebClient;
+import prometheus.query.api.PrometheusClientApi;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 import reactor.util.function.Tuple2;
@@ -39,11 +48,13 @@ public class KafkaClusterFactory {
   private static final DataSize DEFAULT_WEBCLIENT_BUFFER = DataSize.parse("20MB");
 
   private final DataSize webClientMaxBuffSize;
+  private final JmxMetricsRetriever jmxMetricsRetriever;
 
-  public KafkaClusterFactory(WebclientProperties webclientProperties) {
+  public KafkaClusterFactory(WebclientProperties webclientProperties, JmxMetricsRetriever jmxMetricsRetriever) {
     this.webClientMaxBuffSize = Optional.ofNullable(webclientProperties.getMaxInMemoryBufferSize())
         .map(DataSize::parse)
         .orElse(DEFAULT_WEBCLIENT_BUFFER);
+    this.jmxMetricsRetriever = jmxMetricsRetriever;
   }
 
   public KafkaCluster create(ClustersProperties properties,
@@ -54,8 +65,10 @@ public class KafkaClusterFactory {
     builder.bootstrapServers(clusterProperties.getBootstrapServers());
     builder.properties(convertProperties(clusterProperties.getProperties()));
     builder.readOnly(clusterProperties.isReadOnly());
+    builder.exposeMetricsViaPrometheusEndpoint(exposeMetricsViaPrometheusEndpoint(clusterProperties));
     builder.masking(DataMasking.create(clusterProperties.getMasking()));
     builder.pollingSettings(PollingSettings.create(clusterProperties, properties));
+    builder.metricsScrapping(MetricsScrapping.create(clusterProperties, jmxMetricsRetriever));
 
     if (schemaRegistryConfigured(clusterProperties)) {
       builder.schemaRegistryClient(schemaRegistryClient(clusterProperties));
@@ -66,8 +79,8 @@ public class KafkaClusterFactory {
     if (ksqlConfigured(clusterProperties)) {
       builder.ksqlClient(ksqlClient(clusterProperties));
     }
-    if (metricsConfigured(clusterProperties)) {
-      builder.metricsConfig(metricsConfigDataToMetricsConfig(clusterProperties.getMetrics()));
+    if (prometheusStorageConfigured(clusterProperties)) {
+      builder.prometheusStorageClient(prometheusStorageClient(clusterProperties));
     }
     builder.originalProperties(clusterProperties);
     return builder.build();
@@ -75,7 +88,7 @@ public class KafkaClusterFactory {
 
   public Mono<ClusterConfigValidationDTO> validate(ClustersProperties.Cluster clusterProperties) {
     if (clusterProperties.getSsl() != null) {
-      Optional<String> errMsg = KafkaServicesValidation.validateTruststore(clusterProperties.getSsl());
+      Optional<String> errMsg = validateTruststore(clusterProperties.getSsl());
       if (errMsg.isPresent()) {
         return Mono.just(new ClusterConfigValidationDTO()
             .kafka(new ApplicationPropertyValidationDTO()
@@ -85,40 +98,51 @@ public class KafkaClusterFactory {
     }
 
     return Mono.zip(
-        KafkaServicesValidation.validateClusterConnection(
+        validateClusterConnection(
             clusterProperties.getBootstrapServers(),
             convertProperties(clusterProperties.getProperties()),
             clusterProperties.getSsl()
         ),
         schemaRegistryConfigured(clusterProperties)
-            ? KafkaServicesValidation.validateSchemaRegistry(
-                () -> schemaRegistryClient(clusterProperties)).map(Optional::of)
+            ? validateSchemaRegistry(() -> schemaRegistryClient(clusterProperties)).map(Optional::of)
             : Mono.<Optional<ApplicationPropertyValidationDTO>>just(Optional.empty()),
 
         ksqlConfigured(clusterProperties)
-            ? KafkaServicesValidation.validateKsql(() -> ksqlClient(clusterProperties)).map(Optional::of)
+            ? validateKsql(() -> ksqlClient(clusterProperties)).map(Optional::of)
             : Mono.<Optional<ApplicationPropertyValidationDTO>>just(Optional.empty()),
 
         connectClientsConfigured(clusterProperties)
             ?
             Flux.fromIterable(clusterProperties.getKafkaConnect())
                 .flatMap(c ->
-                    KafkaServicesValidation.validateConnect(() -> connectClient(clusterProperties, c))
+                    validateConnect(() -> connectClient(clusterProperties, c))
                         .map(r -> Tuples.of(c.getName(), r)))
                 .collectMap(Tuple2::getT1, Tuple2::getT2)
                 .map(Optional::of)
             :
-            Mono.<Optional<Map<String, ApplicationPropertyValidationDTO>>>just(Optional.empty())
+            Mono.<Optional<Map<String, ApplicationPropertyValidationDTO>>>just(Optional.empty()),
+
+        prometheusStorageConfigured(clusterProperties)
+            ? validatePrometheusStore(() -> prometheusStorageClient(clusterProperties)).map(Optional::of)
+            : Mono.<Optional<ApplicationPropertyValidationDTO>>just(Optional.empty())
+
     ).map(tuple -> {
       var validation = new ClusterConfigValidationDTO();
       validation.kafka(tuple.getT1());
       tuple.getT2().ifPresent(validation::schemaRegistry);
       tuple.getT3().ifPresent(validation::ksqldb);
       tuple.getT4().ifPresent(validation::kafkaConnects);
+      tuple.getT5().ifPresent(validation::prometheusStorage);
       return validation;
     });
   }
 
+  private boolean exposeMetricsViaPrometheusEndpoint(ClustersProperties.Cluster clusterProperties) {
+    return Optional.ofNullable(clusterProperties.getMetrics())
+        .map(m -> m.getPrometheusExpose() == null || m.getPrometheusExpose())
+        .orElse(true);
+  }
+
   private Properties convertProperties(Map<String, Object> propertiesMap) {
     Properties properties = new Properties();
     if (propertiesMap != null) {
@@ -153,6 +177,28 @@ public class KafkaClusterFactory {
     );
   }
 
+  private ReactiveFailover<PrometheusClientApi> prometheusStorageClient(ClustersProperties.Cluster cluster) {
+    WebClient webClient = new WebClientConfigurator()
+        .configureSsl(cluster.getSsl(), null)
+        .configureBufferSize(webClientMaxBuffSize)
+        .build();
+    return ReactiveFailover.create(
+        parseUrlList(cluster.getMetrics().getStore().getPrometheus().getUrl()),
+        url -> new PrometheusClientApi(new prometheus.query.ApiClient(webClient).setBasePath(url)),
+        ReactiveFailover.CONNECTION_REFUSED_EXCEPTION_FILTER,
+        "No live schemaRegistry instances available",
+        ReactiveFailover.DEFAULT_RETRY_GRACE_PERIOD_MS
+    );
+  }
+
+  private boolean prometheusStorageConfigured(ClustersProperties.Cluster cluster) {
+    return Optional.ofNullable(cluster.getMetrics())
+        .flatMap(m -> Optional.ofNullable(m.getStore()))
+        .flatMap(s -> Optional.ofNullable(s.getPrometheus()))
+        .map(p -> StringUtils.hasText(p.getUrl()))
+        .orElse(false);
+  }
+
   private boolean schemaRegistryConfigured(ClustersProperties.Cluster clusterProperties) {
     return clusterProperties.getSchemaRegistry() != null;
   }
@@ -202,20 +248,4 @@ public class KafkaClusterFactory {
     return clusterProperties.getMetrics() != null;
   }
 
-  @Nullable
-  private MetricsConfig metricsConfigDataToMetricsConfig(ClustersProperties.MetricsConfigData metricsConfigData) {
-    if (metricsConfigData == null) {
-      return null;
-    }
-    MetricsConfig.MetricsConfigBuilder builder = MetricsConfig.builder();
-    builder.type(metricsConfigData.getType());
-    builder.port(metricsConfigData.getPort());
-    builder.ssl(Optional.ofNullable(metricsConfigData.getSsl()).orElse(false));
-    builder.username(metricsConfigData.getUsername());
-    builder.password(metricsConfigData.getPassword());
-    builder.keystoreLocation(metricsConfigData.getKeystoreLocation());
-    builder.keystorePassword(metricsConfigData.getKeystorePassword());
-    return builder.build();
-  }
-
 }

+ 7 - 2
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/MessagesService.java

@@ -187,13 +187,18 @@ public class MessagesService {
 
   public static KafkaProducer<byte[], byte[]> createProducer(KafkaCluster cluster,
                                                              Map<String, Object> additionalProps) {
+    return createProducer(cluster.getOriginalProperties(), additionalProps);
+  }
+
+  public static KafkaProducer<byte[], byte[]> createProducer(ClustersProperties.Cluster cluster,
+                                                             Map<String, Object> additionalProps) {
     Properties properties = new Properties();
-    SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
+    SslPropertiesUtil.addKafkaSslProperties(cluster.getSsl(), properties);
+    properties.putAll(additionalProps);
     properties.putAll(cluster.getProperties());
     properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
     properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
     properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
-    properties.putAll(additionalProps);
     return new KafkaProducer<>(properties);
   }
 

+ 3 - 10
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/ReactiveAdminClient.java

@@ -51,6 +51,7 @@ import org.apache.kafka.clients.admin.DescribeConfigsOptions;
 import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec;
 import org.apache.kafka.clients.admin.ListOffsetsResult;
 import org.apache.kafka.clients.admin.ListTopicsOptions;
+import org.apache.kafka.clients.admin.LogDirDescription;
 import org.apache.kafka.clients.admin.NewPartitionReassignment;
 import org.apache.kafka.clients.admin.NewPartitions;
 import org.apache.kafka.clients.admin.NewTopic;
@@ -77,7 +78,6 @@ import org.apache.kafka.common.errors.SecurityDisabledException;
 import org.apache.kafka.common.errors.TopicAuthorizationException;
 import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
 import org.apache.kafka.common.errors.UnsupportedVersionException;
-import org.apache.kafka.common.requests.DescribeLogDirsResponse;
 import org.apache.kafka.common.resource.ResourcePatternFilter;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
@@ -378,15 +378,8 @@ public class ReactiveAdminClient implements Closeable {
     );
   }
 
-  public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> describeLogDirs() {
-    return describeCluster()
-        .map(d -> d.getNodes().stream().map(Node::id).collect(toList()))
-        .flatMap(this::describeLogDirs);
-  }
-
-  public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> describeLogDirs(
-      Collection<Integer> brokerIds) {
-    return toMono(client.describeLogDirs(brokerIds).all())
+  public Mono<Map<Integer, Map<String, LogDirDescription>>> describeLogDirs(Collection<Integer> brokerIds) {
+    return toMono(client.describeLogDirs(brokerIds).allDescriptions())
         .onErrorResume(UnsupportedVersionException.class, th -> Mono.just(Map.of()))
         .onErrorResume(ClusterAuthorizationException.class, th -> Mono.just(Map.of()))
         .onErrorResume(th -> true, th -> {

+ 10 - 18
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/StatisticsCache.java

@@ -1,5 +1,6 @@
 package com.provectus.kafka.ui.service;
 
+import com.provectus.kafka.ui.model.InternalPartitionsOffsets;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.ServerStatusDTO;
 import com.provectus.kafka.ui.model.Statistics;
@@ -28,38 +29,29 @@ public class StatisticsCache {
 
   public synchronized void update(KafkaCluster c,
                                   Map<String, TopicDescription> descriptions,
-                                  Map<String, List<ConfigEntry>> configs) {
-    var metrics = get(c);
-    var updatedDescriptions = new HashMap<>(metrics.getTopicDescriptions());
-    updatedDescriptions.putAll(descriptions);
-    var updatedConfigs = new HashMap<>(metrics.getTopicConfigs());
-    updatedConfigs.putAll(configs);
+                                  Map<String, List<ConfigEntry>> configs,
+                                  InternalPartitionsOffsets partitionsOffsets) {
+    var stats = get(c);
     replace(
         c,
-        metrics.toBuilder()
-            .topicDescriptions(updatedDescriptions)
-            .topicConfigs(updatedConfigs)
+        stats.toBuilder()
+            .clusterState(stats.getClusterState().updateTopics(descriptions, configs, partitionsOffsets))
             .build()
     );
   }
 
   public synchronized void onTopicDelete(KafkaCluster c, String topic) {
-    var metrics = get(c);
-    var updatedDescriptions = new HashMap<>(metrics.getTopicDescriptions());
-    updatedDescriptions.remove(topic);
-    var updatedConfigs = new HashMap<>(metrics.getTopicConfigs());
-    updatedConfigs.remove(topic);
+    var stats = get(c);
     replace(
         c,
-        metrics.toBuilder()
-            .topicDescriptions(updatedDescriptions)
-            .topicConfigs(updatedConfigs)
+        stats.toBuilder()
+            .clusterState(stats.getClusterState().topicDeleted(topic))
             .build()
     );
   }
 
   public Statistics get(KafkaCluster c) {
-    return Objects.requireNonNull(cache.get(c.getName()), "Unknown cluster metrics requested");
+    return Objects.requireNonNull(cache.get(c.getName()), "Statistics for unknown cluster requested");
   }
 
 }

+ 25 - 37
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/StatisticsService.java

@@ -2,21 +2,16 @@ package com.provectus.kafka.ui.service;
 
 import static com.provectus.kafka.ui.service.ReactiveAdminClient.ClusterDescription;
 
-import com.provectus.kafka.ui.model.ClusterFeature;
 import com.provectus.kafka.ui.model.InternalLogDirStats;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.Metrics;
 import com.provectus.kafka.ui.model.ServerStatusDTO;
 import com.provectus.kafka.ui.model.Statistics;
-import com.provectus.kafka.ui.service.metrics.MetricsCollector;
-import java.util.List;
+import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
 import java.util.Map;
 import java.util.stream.Collectors;
 import lombok.RequiredArgsConstructor;
 import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.clients.admin.ConfigEntry;
-import org.apache.kafka.clients.admin.TopicDescription;
-import org.apache.kafka.common.Node;
 import org.springframework.stereotype.Service;
 import reactor.core.publisher.Mono;
 
@@ -25,7 +20,6 @@ import reactor.core.publisher.Mono;
 @Slf4j
 public class StatisticsService {
 
-  private final MetricsCollector metricsCollector;
   private final AdminClientService adminClientService;
   private final FeatureService featureService;
   private final StatisticsCache cache;
@@ -36,44 +30,38 @@ public class StatisticsService {
 
   private Mono<Statistics> getStatistics(KafkaCluster cluster) {
     return adminClientService.get(cluster).flatMap(ac ->
-            ac.describeCluster().flatMap(description ->
-                ac.updateInternalStats(description.getController()).then(
-                    Mono.zip(
-                        List.of(
-                            metricsCollector.getBrokerMetrics(cluster, description.getNodes()),
-                            getLogDirInfo(description, ac),
-                            featureService.getAvailableFeatures(ac, cluster, description),
-                            loadTopicConfigs(cluster),
-                            describeTopics(cluster)),
-                        results ->
-                            Statistics.builder()
-                                .status(ServerStatusDTO.ONLINE)
-                                .clusterDescription(description)
-                                .version(ac.getVersion())
-                                .metrics((Metrics) results[0])
-                                .logDirInfo((InternalLogDirStats) results[1])
-                                .features((List<ClusterFeature>) results[2])
-                                .topicConfigs((Map<String, List<ConfigEntry>>) results[3])
-                                .topicDescriptions((Map<String, TopicDescription>) results[4])
-                                .build()
-                    ))))
+            ac.describeCluster()
+                .flatMap(description ->
+                    ac.updateInternalStats(description.getController())
+                        .then(
+                            Mono.zip(
+                                featureService.getAvailableFeatures(ac, cluster, description),
+                                loadClusterState(description, ac)
+                            ).flatMap(featuresAndState ->
+                                scrapeMetrics(cluster, featuresAndState.getT2(), description)
+                                    .map(metrics ->
+                                        Statistics.builder()
+                                            .status(ServerStatusDTO.ONLINE)
+                                            .clusterDescription(description)
+                                            .version(ac.getVersion())
+                                            .metrics(metrics)
+                                            .features(featuresAndState.getT1())
+                                            .clusterState(featuresAndState.getT2())
+                                            .build())))))
         .doOnError(e ->
             log.error("Failed to collect cluster {} info", cluster.getName(), e))
         .onErrorResume(
             e -> Mono.just(Statistics.empty().toBuilder().lastKafkaException(e).build()));
   }
 
-  private Mono<InternalLogDirStats> getLogDirInfo(ClusterDescription desc, ReactiveAdminClient ac) {
-    var brokerIds = desc.getNodes().stream().map(Node::id).collect(Collectors.toSet());
-    return ac.describeLogDirs(brokerIds).map(InternalLogDirStats::new);
+  private Mono<ScrapedClusterState> loadClusterState(ClusterDescription clusterDescription, ReactiveAdminClient ac) {
+    return ScrapedClusterState.scrape(clusterDescription, ac);
   }
 
-  private Mono<Map<String, TopicDescription>> describeTopics(KafkaCluster c) {
-    return adminClientService.get(c).flatMap(ReactiveAdminClient::describeTopics);
-  }
-
-  private Mono<Map<String, List<ConfigEntry>>> loadTopicConfigs(KafkaCluster c) {
-    return adminClientService.get(c).flatMap(ReactiveAdminClient::getTopicsConfig);
+  private Mono<Metrics> scrapeMetrics(KafkaCluster cluster,
+                                      ScrapedClusterState clusterState,
+                                      ClusterDescription clusterDescription) {
+    return cluster.getMetricsScrapping().scrape(clusterState, clusterDescription.getNodes());
   }
 
 }

+ 30 - 24
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/TopicsService.java

@@ -1,5 +1,6 @@
 package com.provectus.kafka.ui.service;
 
+import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.TopicState;
 import static java.util.stream.Collectors.toList;
 import static java.util.stream.Collectors.toMap;
 
@@ -25,6 +26,7 @@ import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO;
 import com.provectus.kafka.ui.model.Statistics;
 import com.provectus.kafka.ui.model.TopicCreationDTO;
 import com.provectus.kafka.ui.model.TopicUpdateDTO;
+import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
 import java.time.Duration;
 import java.util.Collection;
 import java.util.Collections;
@@ -71,20 +73,19 @@ public class TopicsService {
     return adminClientService.get(c)
         .flatMap(ac ->
             ac.describeTopics(topics).zipWith(ac.getTopicsConfig(topics, false),
-                (descriptions, configs) -> {
-                  statisticsCache.update(c, descriptions, configs);
-                  return getPartitionOffsets(descriptions, ac).map(offsets -> {
-                    var metrics = statisticsCache.get(c);
-                    return createList(
-                        topics,
-                        descriptions,
-                        configs,
-                        offsets,
-                        metrics.getMetrics(),
-                        metrics.getLogDirInfo()
-                    );
-                  });
-                })).flatMap(Function.identity());
+                (descriptions, configs) ->
+                    getPartitionOffsets(descriptions, ac).map(offsets -> {
+                      statisticsCache.update(c, descriptions, configs, offsets);
+                      var stats = statisticsCache.get(c);
+                      return createList(
+                          topics,
+                          descriptions,
+                          configs,
+                          offsets,
+                          stats.getMetrics(),
+                          stats.getClusterState()
+                      );
+                    }))).flatMap(Function.identity());
   }
 
   private Mono<InternalTopic> loadTopic(KafkaCluster c, String topicName) {
@@ -95,8 +96,8 @@ public class TopicsService {
   }
 
   /**
-   *  After creation topic can be invisible via API for some time.
-   *  To workaround this, we retyring topic loading until it becomes visible.
+   * After creation topic can be invisible via API for some time.
+   * To workaround this, we retyring topic loading until it becomes visible.
    */
   private Mono<InternalTopic> loadTopicAfterCreation(KafkaCluster c, String topicName) {
     return loadTopic(c, topicName)
@@ -122,7 +123,7 @@ public class TopicsService {
                                          Map<String, List<ConfigEntry>> configs,
                                          InternalPartitionsOffsets partitionsOffsets,
                                          Metrics metrics,
-                                         InternalLogDirStats logDirInfo) {
+                                         ScrapedClusterState clusterState) {
     return orderedNames.stream()
         .filter(descriptions::containsKey)
         .map(t -> InternalTopic.from(
@@ -130,7 +131,8 @@ public class TopicsService {
             configs.getOrDefault(t, List.of()),
             partitionsOffsets,
             metrics,
-            logDirInfo,
+            Optional.ofNullable(clusterState.getTopicStates().get(t)).map(s -> s.segmentStats()).orElse(null),
+            Optional.ofNullable(clusterState.getTopicStates().get(t)).map(s -> s.partitionsSegmentStats()).orElse(null),
             clustersProperties.getInternalTopicPrefix()
         ))
         .collect(toList());
@@ -225,7 +227,7 @@ public class TopicsService {
   }
 
   public Mono<InternalTopic> updateTopic(KafkaCluster cl, String topicName,
-                                    Mono<TopicUpdateDTO> topicUpdate) {
+                                         Mono<TopicUpdateDTO> topicUpdate) {
     return topicUpdate
         .flatMap(t -> updateTopic(cl, topicName, t));
   }
@@ -444,17 +446,21 @@ public class TopicsService {
 
   public Mono<List<InternalTopic>> getTopicsForPagination(KafkaCluster cluster) {
     Statistics stats = statisticsCache.get(cluster);
-    return filterExisting(cluster, stats.getTopicDescriptions().keySet())
+    Map<String, TopicState> topicStates = stats.getClusterState().getTopicStates();
+    return filterExisting(cluster, topicStates.keySet())
         .map(lst -> lst.stream()
             .map(topicName ->
                 InternalTopic.from(
-                    stats.getTopicDescriptions().get(topicName),
-                    stats.getTopicConfigs().getOrDefault(topicName, List.of()),
+                    topicStates.get(topicName).description(),
+                    topicStates.get(topicName).configs(),
                     InternalPartitionsOffsets.empty(),
                     stats.getMetrics(),
-                    stats.getLogDirInfo(),
+                    Optional.ofNullable(topicStates.get(topicName))
+                        .map(TopicState::segmentStats).orElse(null),
+                    Optional.ofNullable(topicStates.get(topicName))
+                        .map(TopicState::partitionsSegmentStats).orElse(null),
                     clustersProperties.getInternalTopicPrefix()
-                    ))
+                ))
             .collect(toList())
         );
   }

+ 25 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/GraphDescription.java

@@ -0,0 +1,25 @@
+package com.provectus.kafka.ui.service.graphs;
+
+import java.time.Duration;
+import java.util.Set;
+import javax.annotation.Nullable;
+import lombok.Builder;
+
+@Builder
+public record GraphDescription(String id,
+                               @Nullable Duration defaultInterval, //null for instant queries, set for range
+                               String prometheusQuery,
+                               Set<String> params) {
+
+  public static GraphDescriptionBuilder instant() {
+    return builder();
+  }
+
+  public static GraphDescriptionBuilder range(Duration defaultInterval) {
+    return builder().defaultInterval(defaultInterval);
+  }
+
+  public boolean isRange() {
+    return defaultInterval != null;
+  }
+}

+ 74 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/GraphDescriptions.java

@@ -0,0 +1,74 @@
+package com.provectus.kafka.ui.service.graphs;
+
+import static java.util.stream.Collectors.toMap;
+
+import com.provectus.kafka.ui.exception.ValidationException;
+import java.time.Duration;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Stream;
+import org.springframework.stereotype.Component;
+
+@Component
+class GraphDescriptions {
+
+  private static final Duration DEFAULT_RANGE_DURATION = Duration.ofDays(7);
+
+  private final Map<String, GraphDescription> graphsById;
+
+  GraphDescriptions() {
+    validate();
+    this.graphsById = PREDEFINED_GRAPHS.stream().collect(toMap(GraphDescription::id, d -> d));
+  }
+
+  Optional<GraphDescription> getById(String id) {
+    return Optional.ofNullable(graphsById.get(id));
+  }
+
+  Stream<GraphDescription> all() {
+    return graphsById.values().stream();
+  }
+
+  private void validate() {
+    Map<String, String> errors = new HashMap<>();
+    for (GraphDescription description : PREDEFINED_GRAPHS) {
+      new PromQueryTemplate(description)
+          .validateSyntax()
+          .ifPresent(err -> errors.put(description.id(), err));
+    }
+    if (!errors.isEmpty()) {
+      throw new ValidationException("Error validating queries for following graphs: " + errors);
+    }
+  }
+
+  private static final List<GraphDescription> PREDEFINED_GRAPHS = List.of(
+
+      GraphDescription.range(DEFAULT_RANGE_DURATION)
+          .id("broker_bytes_disk_ts")
+          .prometheusQuery("broker_bytes_disk{cluster=\"${cluster}\"}")
+          .params(Set.of())
+          .build(),
+
+      GraphDescription.instant()
+          .id("broker_bytes_disk")
+          .prometheusQuery("broker_bytes_disk{cluster=\"${cluster}\"}")
+          .params(Set.of())
+          .build(),
+
+      GraphDescription.instant()
+          .id("kafka_topic_partition_current_offset")
+          .prometheusQuery("kafka_topic_partition_current_offset{cluster=\"${cluster}\"}")
+          .params(Set.of())
+          .build(),
+
+      GraphDescription.range(DEFAULT_RANGE_DURATION)
+          .id("kafka_topic_partition_current_offset_per_topic_ts")
+          .prometheusQuery("kafka_topic_partition_current_offset{cluster=\"${cluster}\",topic = \"${topic}\"}")
+          .params(Set.of("topic"))
+          .build()
+  );
+
+}

+ 95 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/GraphsService.java

@@ -0,0 +1,95 @@
+package com.provectus.kafka.ui.service.graphs;
+
+import com.google.common.base.Preconditions;
+import com.provectus.kafka.ui.exception.NotFoundException;
+import com.provectus.kafka.ui.exception.ValidationException;
+import com.provectus.kafka.ui.model.KafkaCluster;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Stream;
+import javax.annotation.Nullable;
+import lombok.RequiredArgsConstructor;
+import org.springframework.stereotype.Component;
+import prometheus.query.api.PrometheusClientApi;
+import prometheus.query.model.QueryResponse;
+import reactor.core.publisher.Mono;
+
+@Component
+@RequiredArgsConstructor
+public class GraphsService {
+
+  private static final int TARGET_MATRIX_DATA_POINTS = 200;
+
+  private final GraphDescriptions graphDescriptions;
+
+  public Mono<QueryResponse> getGraphData(KafkaCluster cluster,
+                                          String id,
+                                          @Nullable Instant from,
+                                          @Nullable Instant to,
+                                          @Nullable Map<String, String> params) {
+
+    var graph = graphDescriptions.getById(id)
+        .orElseThrow(() -> new NotFoundException("No graph found with id = " + id));
+
+    var promClient = cluster.getPrometheusStorageClient();
+    if (promClient == null) {
+      throw new ValidationException("Prometheus not configured for cluster");
+    }
+    String preparedQuery = prepareQuery(graph, cluster.getName(), params);
+    return cluster.getPrometheusStorageClient()
+        .mono(client -> {
+          if (graph.isRange()) {
+            return queryRange(client, preparedQuery, graph.defaultInterval(), from, to);
+          }
+          return queryInstant(client, preparedQuery);
+        });
+  }
+
+  private Mono<QueryResponse> queryRange(PrometheusClientApi c,
+                                         String preparedQuery,
+                                         Duration defaultPeriod,
+                                         @Nullable Instant from,
+                                         @Nullable Instant to) {
+    if (from == null) {
+      from = Instant.now().minus(defaultPeriod);
+    }
+    if (to == null) {
+      to = Instant.now();
+    }
+    Preconditions.checkArgument(to.isAfter(from));
+    return c.queryRange(
+        preparedQuery,
+        String.valueOf(from.getEpochSecond()),
+        String.valueOf(to.getEpochSecond()),
+        calculateStepSize(from, to),
+        null
+    );
+  }
+
+  private String calculateStepSize(Instant from, Instant to) {
+    long intervalInSecs = to.getEpochSecond() - from.getEpochSecond();
+    if (intervalInSecs <= TARGET_MATRIX_DATA_POINTS) {
+      return intervalInSecs + "s";
+    }
+    int step = ((int) (((double) intervalInSecs) / TARGET_MATRIX_DATA_POINTS));
+    return step + "s";
+  }
+
+  private Mono<QueryResponse> queryInstant(PrometheusClientApi c, String preparedQuery) {
+    return c.query(preparedQuery, null, null);
+  }
+
+  private String prepareQuery(GraphDescription d, String clusterName, @Nullable Map<String, String> params) {
+    return new PromQueryTemplate(d).getQuery(clusterName, Optional.ofNullable(params).orElse(Map.of()));
+  }
+
+  public Stream<GraphDescription> getGraphs(KafkaCluster cluster) {
+    if (cluster.getPrometheusStorageClient() == null) {
+      return Stream.empty();
+    }
+    return graphDescriptions.all();
+  }
+
+}

+ 35 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/PromQueryLangGrammar.java

@@ -0,0 +1,35 @@
+package com.provectus.kafka.ui.service.graphs;
+
+import java.util.Optional;
+import org.antlr.v4.runtime.BailErrorStrategy;
+import org.antlr.v4.runtime.CharStreams;
+import org.antlr.v4.runtime.CommonTokenStream;
+import org.antlr.v4.runtime.misc.ParseCancellationException;
+import promql.PromQLLexer;
+import promql.PromQLParser;
+
+class PromQueryLangGrammar {
+
+  // returns error msg, or empty if query is valid
+  static Optional<String> validateExpression(String query) {
+    try {
+      parseExpression(query);
+      return Optional.empty();
+    } catch (ParseCancellationException e) {
+      //TODO: add more descriptive msg
+      return Optional.of("Syntax error");
+    }
+  }
+
+  static PromQLParser.ExpressionContext parseExpression(String query) {
+    return createParser(query).expression();
+  }
+
+  private static PromQLParser createParser(String str) {
+    var parser = new PromQLParser(new CommonTokenStream(new PromQLLexer(CharStreams.fromString(str))));
+    parser.removeErrorListeners();
+    parser.setErrorHandler(new BailErrorStrategy());
+    return parser;
+  }
+
+}

+ 51 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/graphs/PromQueryTemplate.java

@@ -0,0 +1,51 @@
+package com.provectus.kafka.ui.service.graphs;
+
+import com.google.common.collect.Sets;
+import com.provectus.kafka.ui.exception.ValidationException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import org.apache.commons.lang3.text.StrSubstitutor;
+
+class PromQueryTemplate {
+
+  private static final String CLUSTER_LABEL_NAME = "cluster";
+
+  private final String queryTemplate;
+  private final Set<String> paramsNames;
+
+  PromQueryTemplate(GraphDescription d) {
+    this(d.prometheusQuery(), d.params());
+  }
+
+  PromQueryTemplate(String templateQueryString, Set<String> paramsNames) {
+    this.queryTemplate = templateQueryString;
+    this.paramsNames = paramsNames;
+  }
+
+  String getQuery(String clusterName, Map<String, String> paramValues) {
+    var missingParams = Sets.difference(paramsNames, paramValues.keySet());
+    if (!missingParams.isEmpty()) {
+      throw new ValidationException("Not all params set for query, missing: " + missingParams);
+    }
+    Map<String, String> replacements = new HashMap<>(paramValues);
+    replacements.put(CLUSTER_LABEL_NAME, clusterName);
+    return replaceParams(replacements);
+  }
+
+  // returns error msg or empty if no errors found
+  Optional<String> validateSyntax() {
+    Map<String, String> fakeReplacements = new HashMap<>();
+    fakeReplacements.put(CLUSTER_LABEL_NAME, "1");
+    paramsNames.forEach(paramName -> fakeReplacements.put(paramName, "1"));
+
+    String prepared = replaceParams(fakeReplacements);
+    return PromQueryLangGrammar.validateExpression(prepared);
+  }
+
+  private String replaceParams(Map<String, String> replacements) {
+    return new StrSubstitutor(replacements).replace(queryTemplate);
+  }
+
+}

+ 12 - 11
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/integration/odd/TopicsExporter.java

@@ -1,8 +1,9 @@
 package com.provectus.kafka.ui.service.integration.odd;
 
+import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.TopicState;
+
 import com.google.common.collect.ImmutableMap;
 import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.Statistics;
 import com.provectus.kafka.ui.service.StatisticsCache;
 import com.provectus.kafka.ui.service.integration.odd.schema.DataSetFieldsExtractors;
 import com.provectus.kafka.ui.sr.model.SchemaSubject;
@@ -37,10 +38,10 @@ class TopicsExporter {
 
   Flux<DataEntityList> export(KafkaCluster cluster) {
     String clusterOddrn = Oddrn.clusterOddrn(cluster);
-    Statistics stats = statisticsCache.get(cluster);
-    return Flux.fromIterable(stats.getTopicDescriptions().keySet())
+    var clusterState = statisticsCache.get(cluster).getClusterState();
+    return Flux.fromIterable(clusterState.getTopicStates().keySet())
         .filter(topicFilter)
-        .flatMap(topic -> createTopicDataEntity(cluster, topic, stats))
+        .flatMap(topic -> createTopicDataEntity(cluster, topic, clusterState.getTopicStates().get(topic)))
         .onErrorContinue(
             (th, topic) -> log.warn("Error exporting data for topic {}, cluster {}", topic, cluster.getName(), th))
         .buffer(100)
@@ -50,7 +51,7 @@ class TopicsExporter {
                 .items(topicsEntities));
   }
 
-  private Mono<DataEntity> createTopicDataEntity(KafkaCluster cluster, String topic, Statistics stats) {
+  private Mono<DataEntity> createTopicDataEntity(KafkaCluster cluster, String topic, TopicState topicState) {
     KafkaPath topicOddrnPath = Oddrn.topicOddrnPath(cluster, topic);
     return
         Mono.zip(
@@ -70,13 +71,13 @@ class TopicsExporter {
                       .addMetadataItem(
                           new MetadataExtension()
                               .schemaUrl(URI.create("wontbeused.oops"))
-                              .metadata(getTopicMetadata(topic, stats)));
+                              .metadata(getTopicMetadata(topicState)));
                 }
             );
   }
 
-  private Map<String, Object> getNonDefaultConfigs(String topic, Statistics stats) {
-    List<ConfigEntry> config = stats.getTopicConfigs().get(topic);
+  private Map<String, Object> getNonDefaultConfigs(TopicState topicState) {
+    List<ConfigEntry> config = topicState.configs();
     if (config == null) {
       return Map.of();
     }
@@ -85,12 +86,12 @@ class TopicsExporter {
         .collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value));
   }
 
-  private Map<String, Object> getTopicMetadata(String topic, Statistics stats) {
-    TopicDescription topicDescription = stats.getTopicDescriptions().get(topic);
+  private Map<String, Object> getTopicMetadata(TopicState topicState) {
+    TopicDescription topicDescription = topicState.description();
     return ImmutableMap.<String, Object>builder()
         .put("partitions", topicDescription.partitions().size())
         .put("replication_factor", topicDescription.partitions().get(0).replicas().size())
-        .putAll(getNonDefaultConfigs(topic, stats))
+        .putAll(getNonDefaultConfigs(topicState))
         .build();
   }
 

+ 0 - 69
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/MetricsCollector.java

@@ -1,69 +0,0 @@
-package com.provectus.kafka.ui.service.metrics;
-
-import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.Metrics;
-import com.provectus.kafka.ui.model.MetricsConfig;
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.stream.Collectors;
-import lombok.RequiredArgsConstructor;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.common.Node;
-import org.springframework.stereotype.Component;
-import reactor.core.publisher.Flux;
-import reactor.core.publisher.Mono;
-import reactor.util.function.Tuple2;
-import reactor.util.function.Tuples;
-
-@Component
-@Slf4j
-@RequiredArgsConstructor
-public class MetricsCollector {
-
-  private final JmxMetricsRetriever jmxMetricsRetriever;
-  private final PrometheusMetricsRetriever prometheusMetricsRetriever;
-
-  public Mono<Metrics> getBrokerMetrics(KafkaCluster cluster, Collection<Node> nodes) {
-    return Flux.fromIterable(nodes)
-        .flatMap(n -> getMetrics(cluster, n).map(lst -> Tuples.of(n, lst)))
-        .collectMap(Tuple2::getT1, Tuple2::getT2)
-        .map(nodeMetrics -> collectMetrics(cluster, nodeMetrics))
-        .defaultIfEmpty(Metrics.empty());
-  }
-
-  private Mono<List<RawMetric>> getMetrics(KafkaCluster kafkaCluster, Node node) {
-    Flux<RawMetric> metricFlux = Flux.empty();
-    if (kafkaCluster.getMetricsConfig() != null) {
-      String type = kafkaCluster.getMetricsConfig().getType();
-      if (type == null || type.equalsIgnoreCase(MetricsConfig.JMX_METRICS_TYPE)) {
-        metricFlux = jmxMetricsRetriever.retrieve(kafkaCluster, node);
-      } else if (type.equalsIgnoreCase(MetricsConfig.PROMETHEUS_METRICS_TYPE)) {
-        metricFlux = prometheusMetricsRetriever.retrieve(kafkaCluster, node);
-      }
-    }
-    return metricFlux.collectList();
-  }
-
-  public Metrics collectMetrics(KafkaCluster cluster, Map<Node, List<RawMetric>> perBrokerMetrics) {
-    Metrics.MetricsBuilder builder = Metrics.builder()
-        .perBrokerMetrics(
-            perBrokerMetrics.entrySet()
-                .stream()
-                .collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue)));
-
-    populateWellknowMetrics(cluster, perBrokerMetrics)
-        .apply(builder);
-
-    return builder.build();
-  }
-
-  private WellKnownMetrics populateWellknowMetrics(KafkaCluster cluster, Map<Node, List<RawMetric>> perBrokerMetrics) {
-    WellKnownMetrics wellKnownMetrics = new WellKnownMetrics();
-    perBrokerMetrics.forEach((node, metrics) ->
-        metrics.forEach(metric ->
-            wellKnownMetrics.populate(node, metric)));
-    return wellKnownMetrics;
-  }
-
-}

+ 0 - 9
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/MetricsRetriever.java

@@ -1,9 +0,0 @@
-package com.provectus.kafka.ui.service.metrics;
-
-import com.provectus.kafka.ui.model.KafkaCluster;
-import org.apache.kafka.common.Node;
-import reactor.core.publisher.Flux;
-
-interface MetricsRetriever {
-  Flux<RawMetric> retrieve(KafkaCluster c, Node node);
-}

+ 0 - 46
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/PrometheusEndpointMetricsParser.java

@@ -1,46 +0,0 @@
-package com.provectus.kafka.ui.service.metrics;
-
-import java.math.BigDecimal;
-import java.util.Arrays;
-import java.util.Optional;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-import java.util.stream.Collectors;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.commons.lang3.math.NumberUtils;
-
-@Slf4j
-class PrometheusEndpointMetricsParser {
-
-  /**
-   * Matches openmetrics format. For example, string:
-   * kafka_server_BrokerTopicMetrics_FiveMinuteRate{name="BytesInPerSec",topic="__consumer_offsets",} 16.94886650744339
-   * will produce:
-   * name=kafka_server_BrokerTopicMetrics_FiveMinuteRate
-   * value=16.94886650744339
-   * labels={name="BytesInPerSec", topic="__consumer_offsets"}",
-   */
-  private static final Pattern PATTERN = Pattern.compile(
-      "(?<metricName>^\\w+)([ \t]*\\{*(?<properties>.*)}*)[ \\t]+(?<value>[\\d]+\\.?[\\d]+)?");
-
-  static Optional<RawMetric> parse(String s) {
-    Matcher matcher = PATTERN.matcher(s);
-    if (matcher.matches()) {
-      String value = matcher.group("value");
-      String metricName = matcher.group("metricName");
-      if (metricName == null || !NumberUtils.isCreatable(value)) {
-        return Optional.empty();
-      }
-      var labels = Arrays.stream(matcher.group("properties").split(","))
-          .filter(str -> !"".equals(str))
-          .map(str -> str.split("="))
-          .filter(spit -> spit.length == 2)
-          .collect(Collectors.toUnmodifiableMap(
-              str -> str[0].trim(),
-              str -> str[1].trim().replace("\"", "")));
-
-      return Optional.of(RawMetric.create(metricName, labels, new BigDecimal(value)));
-    }
-    return Optional.empty();
-  }
-}

+ 0 - 70
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/PrometheusMetricsRetriever.java

@@ -1,70 +0,0 @@
-package com.provectus.kafka.ui.service.metrics;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
-import com.provectus.kafka.ui.config.ClustersProperties;
-import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.MetricsConfig;
-import com.provectus.kafka.ui.util.WebClientConfigurator;
-import java.util.Arrays;
-import java.util.Optional;
-import lombok.extern.slf4j.Slf4j;
-import org.apache.kafka.common.Node;
-import org.springframework.stereotype.Service;
-import org.springframework.util.unit.DataSize;
-import org.springframework.web.reactive.function.client.WebClient;
-import org.springframework.web.util.UriComponentsBuilder;
-import reactor.core.publisher.Flux;
-import reactor.core.publisher.Mono;
-
-@Service
-@Slf4j
-class PrometheusMetricsRetriever implements MetricsRetriever {
-
-  private static final String METRICS_ENDPOINT_PATH = "/metrics";
-  private static final int DEFAULT_EXPORTER_PORT = 11001;
-
-  @Override
-  public Flux<RawMetric> retrieve(KafkaCluster c, Node node) {
-    log.debug("Retrieving metrics from prometheus exporter: {}:{}", node.host(), c.getMetricsConfig().getPort());
-
-    MetricsConfig metricsConfig = c.getMetricsConfig();
-    var webClient = new WebClientConfigurator()
-        .configureBufferSize(DataSize.ofMegabytes(20))
-        .configureBasicAuth(metricsConfig.getUsername(), metricsConfig.getPassword())
-        .configureSsl(
-            c.getOriginalProperties().getSsl(),
-            new ClustersProperties.KeystoreConfig(
-                metricsConfig.getKeystoreLocation(),
-                metricsConfig.getKeystorePassword()))
-        .build();
-
-    return retrieve(webClient, node.host(), c.getMetricsConfig());
-  }
-
-  @VisibleForTesting
-  Flux<RawMetric> retrieve(WebClient webClient, String host, MetricsConfig metricsConfig) {
-    int port = Optional.ofNullable(metricsConfig.getPort()).orElse(DEFAULT_EXPORTER_PORT);
-    boolean sslEnabled = metricsConfig.isSsl() || metricsConfig.getKeystoreLocation() != null;
-    var request = webClient.get()
-        .uri(UriComponentsBuilder.newInstance()
-            .scheme(sslEnabled ? "https" : "http")
-            .host(host)
-            .port(port)
-            .path(METRICS_ENDPOINT_PATH).build().toUri());
-
-    WebClient.ResponseSpec responseSpec = request.retrieve();
-    return responseSpec.bodyToMono(String.class)
-        .doOnError(e -> log.error("Error while getting metrics from {}", host, e))
-        .onErrorResume(th -> Mono.empty())
-        .flatMapMany(body ->
-            Flux.fromStream(
-                Arrays.stream(body.split("\\n"))
-                    .filter(str -> !Strings.isNullOrEmpty(str) && !str.startsWith("#")) // skipping comments strings
-                    .map(PrometheusEndpointMetricsParser::parse)
-                    .filter(Optional::isPresent)
-                    .map(Optional::get)
-            )
-        );
-  }
-}

+ 22 - 37
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/RawMetric.java

@@ -1,10 +1,15 @@
 package com.provectus.kafka.ui.service.metrics;
 
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static io.prometheus.client.Collector.Type;
+
 import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
-import lombok.AllArgsConstructor;
-import lombok.EqualsAndHashCode;
-import lombok.ToString;
+import java.util.stream.Stream;
 
 public interface RawMetric {
 
@@ -14,47 +19,27 @@ public interface RawMetric {
 
   BigDecimal value();
 
-  // Key, that can be used for metrics reductions
-  default Object identityKey() {
-    return name() + "_" + labels();
-  }
-
-  RawMetric copyWithValue(BigDecimal newValue);
-
   //--------------------------------------------------
 
   static RawMetric create(String name, Map<String, String> labels, BigDecimal value) {
     return new SimpleMetric(name, labels, value);
   }
 
-  @AllArgsConstructor
-  @EqualsAndHashCode
-  @ToString
-  class SimpleMetric implements RawMetric {
-
-    private final String name;
-    private final Map<String, String> labels;
-    private final BigDecimal value;
-
-    @Override
-    public String name() {
-      return name;
-    }
-
-    @Override
-    public Map<String, String> labels() {
-      return labels;
-    }
-
-    @Override
-    public BigDecimal value() {
-      return value;
-    }
-
-    @Override
-    public RawMetric copyWithValue(BigDecimal newValue) {
-      return new SimpleMetric(name, labels, newValue);
+  static Stream<MetricFamilySamples> groupIntoMfs(Collection<RawMetric> rawMetrics) {
+    Map<String, MetricFamilySamples> map = new LinkedHashMap<>();
+    for (RawMetric m : rawMetrics) {
+      var mfs = map.get(m.name());
+      if (mfs == null) {
+        mfs = new MetricFamilySamples(m.name(), Type.GAUGE, m.name(), new ArrayList<>());
+        map.put(m.name(), mfs);
+      }
+      List<String> lbls = m.labels().keySet().stream().toList();
+      List<String> lblVals = lbls.stream().map(l -> m.labels().get(l)).toList();
+      mfs.samples.add(new MetricFamilySamples.Sample(m.name(), lbls, lblVals, m.value().doubleValue()));
     }
+    return map.values().stream();
   }
 
+  record SimpleMetric(String name, Map<String, String> labels, BigDecimal value) implements RawMetric { }
+
 }

+ 73 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/SummarizedMetrics.java

@@ -0,0 +1,73 @@
+package com.provectus.kafka.ui.service.metrics;
+
+import static java.util.stream.Collectors.toMap;
+
+import com.google.common.collect.Streams;
+import com.provectus.kafka.ui.model.Metrics;
+import groovy.lang.Tuple;
+import io.prometheus.client.Collector.MetricFamilySamples;
+import java.util.Collection;
+import java.util.LinkedHashMap;
+import java.util.Optional;
+import java.util.stream.Stream;
+import lombok.RequiredArgsConstructor;
+
+@RequiredArgsConstructor
+public class SummarizedMetrics {
+
+  private final Metrics metrics;
+
+  public Stream<MetricFamilySamples> asStream() {
+    return Streams.concat(
+        metrics.getInferredMetrics().asStream(),
+        metrics.getPerBrokerScrapedMetrics()
+            .values()
+            .stream()
+            .flatMap(Collection::stream)
+            .collect(toMap(mfs -> mfs.name, Optional::of, SummarizedMetrics::summarizeMfs, LinkedHashMap::new))
+            .values()
+            .stream()
+            .filter(Optional::isPresent)
+            .map(Optional::get)
+    );
+  }
+
+  //returns Optional.empty if merging not supported for metric type
+  private static Optional<MetricFamilySamples> summarizeMfs(Optional<MetricFamilySamples> mfs1opt,
+                                                            Optional<MetricFamilySamples> mfs2opt) {
+    if ((mfs1opt.isEmpty() || mfs2opt.isEmpty()) || (mfs1opt.get().type != mfs2opt.get().type)) {
+      return Optional.empty();
+    }
+    var mfs1 = mfs1opt.get();
+    return switch (mfs1.type) {
+      case GAUGE, COUNTER -> Optional.of(
+          new MetricFamilySamples(
+              mfs1.name,
+              mfs1.type,
+              mfs1.help,
+              Stream.concat(mfs1.samples.stream(), mfs2opt.get().samples.stream())
+                  .collect(
+                      toMap(
+                          // merging samples with same labels
+                          s -> Tuple.tuple(s.name, s.labelNames, s.labelValues),
+                          s -> s,
+                          (s1, s2) -> new MetricFamilySamples.Sample(
+                              s1.name,
+                              s1.labelNames,
+                              s1.labelValues,
+                              s1.value + s2.value
+                          ),
+                          LinkedHashMap::new
+                      )
+                  )
+                  .values()
+                  .stream()
+                  .toList()
+          )
+      );
+      default -> Optional.empty();
+    };
+  }
+
+
+}

+ 0 - 67
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/WellKnownMetrics.java

@@ -1,67 +0,0 @@
-package com.provectus.kafka.ui.service.metrics;
-
-import static org.apache.commons.lang3.StringUtils.containsIgnoreCase;
-import static org.apache.commons.lang3.StringUtils.endsWithIgnoreCase;
-
-import com.provectus.kafka.ui.model.Metrics;
-import java.math.BigDecimal;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.kafka.common.Node;
-
-class WellKnownMetrics {
-
-  // per broker
-  final Map<Integer, BigDecimal> brokerBytesInFifteenMinuteRate = new HashMap<>();
-  final Map<Integer, BigDecimal> brokerBytesOutFifteenMinuteRate = new HashMap<>();
-
-  // per topic
-  final Map<String, BigDecimal> bytesInFifteenMinuteRate = new HashMap<>();
-  final Map<String, BigDecimal> bytesOutFifteenMinuteRate = new HashMap<>();
-
-  void populate(Node node, RawMetric rawMetric) {
-    updateBrokerIOrates(node, rawMetric);
-    updateTopicsIOrates(rawMetric);
-  }
-
-  void apply(Metrics.MetricsBuilder metricsBuilder) {
-    metricsBuilder.topicBytesInPerSec(bytesInFifteenMinuteRate);
-    metricsBuilder.topicBytesOutPerSec(bytesOutFifteenMinuteRate);
-    metricsBuilder.brokerBytesInPerSec(brokerBytesInFifteenMinuteRate);
-    metricsBuilder.brokerBytesOutPerSec(brokerBytesOutFifteenMinuteRate);
-  }
-
-  private void updateBrokerIOrates(Node node, RawMetric rawMetric) {
-    String name = rawMetric.name();
-    if (!brokerBytesInFifteenMinuteRate.containsKey(node.id())
-        && rawMetric.labels().size() == 1
-        && "BytesInPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
-        && containsIgnoreCase(name, "BrokerTopicMetrics")
-        && endsWithIgnoreCase(name, "FifteenMinuteRate")) {
-      brokerBytesInFifteenMinuteRate.put(node.id(),  rawMetric.value());
-    }
-    if (!brokerBytesOutFifteenMinuteRate.containsKey(node.id())
-        && rawMetric.labels().size() == 1
-        && "BytesOutPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
-        && containsIgnoreCase(name, "BrokerTopicMetrics")
-        && endsWithIgnoreCase(name, "FifteenMinuteRate")) {
-      brokerBytesOutFifteenMinuteRate.put(node.id(), rawMetric.value());
-    }
-  }
-
-  private void updateTopicsIOrates(RawMetric rawMetric) {
-    String name = rawMetric.name();
-    String topic = rawMetric.labels().get("topic");
-    if (topic != null
-        && containsIgnoreCase(name, "BrokerTopicMetrics")
-        && endsWithIgnoreCase(name, "FifteenMinuteRate")) {
-      String nameProperty = rawMetric.labels().get("name");
-      if ("BytesInPerSec".equalsIgnoreCase(nameProperty)) {
-        bytesInFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value()));
-      } else if ("BytesOutPerSec".equalsIgnoreCase(nameProperty)) {
-        bytesOutFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value()));
-      }
-    }
-  }
-
-}

+ 123 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/prometheus/PrometheusExpose.java

@@ -0,0 +1,123 @@
+package com.provectus.kafka.ui.service.metrics.prometheus;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static io.prometheus.client.exporter.common.TextFormat.CONTENT_TYPE_OPENMETRICS_100;
+import static org.springframework.http.HttpHeaders.CONTENT_TYPE;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Iterators;
+import com.provectus.kafka.ui.model.Metrics;
+import io.prometheus.client.exporter.common.TextFormat;
+import java.io.StringWriter;
+import java.util.ArrayList;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import lombok.SneakyThrows;
+import org.springframework.http.HttpHeaders;
+import org.springframework.http.ResponseEntity;
+
+public final class PrometheusExpose {
+
+  private static final String CLUSTER_EXPOSE_LBL_NAME = "cluster";
+  private static final String BROKER_EXPOSE_LBL_NAME = "broker_id";
+
+  private static final HttpHeaders PROMETHEUS_EXPOSE_ENDPOINT_HEADERS;
+
+  static {
+    PROMETHEUS_EXPOSE_ENDPOINT_HEADERS = new HttpHeaders();
+    PROMETHEUS_EXPOSE_ENDPOINT_HEADERS.set(CONTENT_TYPE, CONTENT_TYPE_OPENMETRICS_100);
+  }
+
+  private PrometheusExpose() {
+  }
+
+  public static ResponseEntity<String> exposeAllMetrics(Map<String, Metrics> clustersMetrics) {
+    return constructHttpsResponse(getMetricsForGlobalExpose(clustersMetrics));
+  }
+
+  private static Stream<MetricFamilySamples> getMetricsForGlobalExpose(Map<String, Metrics> clustersMetrics) {
+    return clustersMetrics.entrySet()
+        .stream()
+        .flatMap(e -> prepareMetricsForGlobalExpose(e.getKey(), e.getValue()))
+        // merging MFS with same name with LinkedHashMap(for order keeping)
+        .collect(Collectors.toMap(mfs -> mfs.name, mfs -> mfs,
+            PrometheusExpose::concatSamples, LinkedHashMap::new))
+        .values()
+        .stream();
+  }
+
+  public static Stream<MetricFamilySamples> prepareMetricsForGlobalExpose(String clusterName, Metrics metrics) {
+    return Stream.concat(
+            metrics.getInferredMetrics().asStream(),
+            extractBrokerMetricsWithLabel(metrics)
+        )
+        .map(mfs -> appendLabel(mfs, CLUSTER_EXPOSE_LBL_NAME, clusterName));
+  }
+
+  private static Stream<MetricFamilySamples> extractBrokerMetricsWithLabel(Metrics metrics) {
+    return metrics.getPerBrokerScrapedMetrics().entrySet().stream()
+        .flatMap(e -> {
+          String brokerId = String.valueOf(e.getKey());
+          return e.getValue().stream().map(mfs -> appendLabel(mfs, BROKER_EXPOSE_LBL_NAME, brokerId));
+        });
+  }
+
+  private static MetricFamilySamples concatSamples(MetricFamilySamples mfs1,
+                                                   MetricFamilySamples mfs2) {
+    return new MetricFamilySamples(
+        mfs1.name, mfs1.unit, mfs1.type, mfs1.help,
+        Stream.concat(mfs1.samples.stream(), mfs2.samples.stream()).toList()
+    );
+  }
+
+  private static MetricFamilySamples appendLabel(MetricFamilySamples mfs, String lblName, String lblVal) {
+    return new MetricFamilySamples(
+        mfs.name, mfs.unit, mfs.type, mfs.help,
+        mfs.samples.stream()
+            .map(sample ->
+                new MetricFamilySamples.Sample(
+                    sample.name,
+                    prependToList(sample.labelNames, lblName),
+                    prependToList(sample.labelValues, lblVal),
+                    sample.value
+                )).toList()
+    );
+  }
+
+  private static <T> List<T> prependToList(List<T> lst, T toPrepend) {
+    var result = new ArrayList<T>(lst.size() + 1);
+    result.add(toPrepend);
+    result.addAll(lst);
+    return result;
+  }
+
+  @VisibleForTesting
+  @SneakyThrows
+  public static ResponseEntity<String> constructHttpsResponse(Stream<MetricFamilySamples> metrics) {
+    StringWriter writer = new StringWriter();
+    TextFormat.writeOpenMetrics100(writer, Iterators.asEnumeration(metrics.iterator()));
+    return ResponseEntity
+        .ok()
+        .headers(PROMETHEUS_EXPOSE_ENDPOINT_HEADERS)
+        .body(writer.toString());
+  }
+
+  // copied from io.prometheus.client.exporter.common.TextFormat:writeEscapedLabelValue
+  public static String escapedLabelValue(String s) {
+    StringBuilder sb = new StringBuilder(s.length());
+    for (int i = 0; i < s.length(); i++) {
+      char c = s.charAt(i);
+      switch (c) {
+        case '\\' -> sb.append("\\\\");
+        case '\"' -> sb.append("\\\"");
+        case '\n' -> sb.append("\\n");
+        default -> sb.append(c);
+      }
+    }
+    return sb.toString();
+  }
+
+}

+ 83 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/IoRatesMetricsScanner.java

@@ -0,0 +1,83 @@
+package com.provectus.kafka.ui.service.metrics.scrape;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static org.apache.commons.lang3.StringUtils.containsIgnoreCase;
+import static org.apache.commons.lang3.StringUtils.endsWithIgnoreCase;
+
+import com.provectus.kafka.ui.model.Metrics;
+import java.math.BigDecimal;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+// Scans external jmx/prometheus metric and tries to infer io rates
+class IoRatesMetricsScanner {
+
+  // per broker
+  final Map<Integer, BigDecimal> brokerBytesInFifteenMinuteRate = new HashMap<>();
+  final Map<Integer, BigDecimal> brokerBytesOutFifteenMinuteRate = new HashMap<>();
+
+  // per topic
+  final Map<String, BigDecimal> bytesInFifteenMinuteRate = new HashMap<>();
+  final Map<String, BigDecimal> bytesOutFifteenMinuteRate = new HashMap<>();
+
+  IoRatesMetricsScanner(Map<Integer, List<MetricFamilySamples>> perBrokerMetrics) {
+    perBrokerMetrics.forEach((nodeId, metrics) -> {
+      metrics.forEach(m -> {
+        m.samples.forEach(metricSample -> {
+          updateBrokerIOrates(nodeId, metricSample);
+          updateTopicsIOrates(metricSample);
+        });
+      });
+    });
+  }
+
+  Metrics.IoRates get() {
+    return Metrics.IoRates.builder()
+        .topicBytesInPerSec(bytesInFifteenMinuteRate)
+        .topicBytesOutPerSec(bytesOutFifteenMinuteRate)
+        .brokerBytesInPerSec(brokerBytesInFifteenMinuteRate)
+        .brokerBytesOutPerSec(brokerBytesOutFifteenMinuteRate)
+        .build();
+  }
+
+  private void updateBrokerIOrates(int nodeId, MetricFamilySamples.Sample metric) {
+    String name = metric.name;
+    if (!brokerBytesInFifteenMinuteRate.containsKey(nodeId)
+        && metric.labelValues.size() == 1
+        && "BytesInPerSec".equalsIgnoreCase(metric.labelValues.get(0))
+        && containsIgnoreCase(name, "BrokerTopicMetrics")
+        && endsWithIgnoreCase(name, "FifteenMinuteRate")) {
+      brokerBytesInFifteenMinuteRate.put(nodeId, BigDecimal.valueOf(metric.value));
+    }
+    if (!brokerBytesOutFifteenMinuteRate.containsKey(nodeId)
+        && metric.labelValues.size() == 1
+        && "BytesOutPerSec".equalsIgnoreCase(metric.labelValues.get(0))
+        && containsIgnoreCase(name, "BrokerTopicMetrics")
+        && endsWithIgnoreCase(name, "FifteenMinuteRate")) {
+      brokerBytesOutFifteenMinuteRate.put(nodeId, BigDecimal.valueOf(metric.value));
+    }
+  }
+
+  private void updateTopicsIOrates(MetricFamilySamples.Sample metric) {
+    String name = metric.name;
+    int topicLblIdx = metric.labelNames.indexOf("topic");
+    if (topicLblIdx >= 0
+        && containsIgnoreCase(name, "BrokerTopicMetrics")
+        && endsWithIgnoreCase(name, "FifteenMinuteRate")) {
+      String topic = metric.labelValues.get(topicLblIdx);
+      int nameLblIdx = metric.labelNames.indexOf("name");
+      if (nameLblIdx >= 0) {
+        var nameLblVal = metric.labelValues.get(nameLblIdx);
+        if ("BytesInPerSec".equalsIgnoreCase(nameLblVal)) {
+          BigDecimal val = BigDecimal.valueOf(metric.value);
+          bytesInFifteenMinuteRate.merge(topic, val, BigDecimal::add);
+        } else if ("BytesOutPerSec".equalsIgnoreCase(nameLblVal)) {
+          BigDecimal val = BigDecimal.valueOf(metric.value);
+          bytesOutFifteenMinuteRate.merge(topic, val, BigDecimal::add);
+        }
+      }
+    }
+  }
+
+}

+ 94 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/MetricsScrapping.java

@@ -0,0 +1,94 @@
+package com.provectus.kafka.ui.service.metrics.scrape;
+
+import static com.provectus.kafka.ui.config.ClustersProperties.Cluster;
+import static com.provectus.kafka.ui.config.ClustersProperties.KeystoreConfig;
+import static com.provectus.kafka.ui.model.MetricsScrapeProperties.JMX_METRICS_TYPE;
+import static com.provectus.kafka.ui.model.MetricsScrapeProperties.PROMETHEUS_METRICS_TYPE;
+import static io.prometheus.client.Collector.MetricFamilySamples;
+
+import com.provectus.kafka.ui.model.Metrics;
+import com.provectus.kafka.ui.model.MetricsScrapeProperties;
+import com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose;
+import com.provectus.kafka.ui.service.metrics.scrape.inferred.InferredMetrics;
+import com.provectus.kafka.ui.service.metrics.scrape.inferred.InferredMetricsScraper;
+import com.provectus.kafka.ui.service.metrics.scrape.jmx.JmxMetricsRetriever;
+import com.provectus.kafka.ui.service.metrics.scrape.jmx.JmxMetricsScraper;
+import com.provectus.kafka.ui.service.metrics.scrape.prometheus.PrometheusScraper;
+import com.provectus.kafka.ui.service.metrics.sink.MetricsSink;
+import jakarta.annotation.Nullable;
+import java.util.Collection;
+import java.util.Optional;
+import java.util.stream.Stream;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.common.Node;
+import reactor.core.publisher.Mono;
+
+@Slf4j
+@RequiredArgsConstructor
+public class MetricsScrapping {
+
+  private final String clusterName;
+  private final MetricsSink sink;
+  private final InferredMetricsScraper inferredMetricsScraper;
+  @Nullable
+  private final JmxMetricsScraper jmxMetricsScraper;
+  @Nullable
+  private final PrometheusScraper prometheusScraper;
+
+  public static MetricsScrapping create(Cluster cluster,
+                                        JmxMetricsRetriever jmxMetricsRetriever) {
+    JmxMetricsScraper jmxMetricsScraper = null;
+    PrometheusScraper prometheusScraper = null;
+    var metrics = cluster.getMetrics();
+    if (cluster.getMetrics() != null) {
+      var scrapeProperties = MetricsScrapeProperties.create(cluster);
+      if (metrics.getType().equalsIgnoreCase(JMX_METRICS_TYPE) && metrics.getPort() != null) {
+        jmxMetricsScraper = new JmxMetricsScraper(scrapeProperties, jmxMetricsRetriever);
+      } else if (metrics.getType().equalsIgnoreCase(PROMETHEUS_METRICS_TYPE)) {
+        prometheusScraper = new PrometheusScraper(scrapeProperties);
+      }
+    }
+    return new MetricsScrapping(
+        cluster.getName(),
+        MetricsSink.create(cluster),
+        new InferredMetricsScraper(),
+        jmxMetricsScraper,
+        prometheusScraper
+    );
+  }
+
+  public Mono<Metrics> scrape(ScrapedClusterState clusterState, Collection<Node> nodes) {
+    Mono<InferredMetrics> inferred = inferredMetricsScraper.scrape(clusterState);
+    Mono<PerBrokerScrapedMetrics> external = scrapeExternal(nodes);
+    return inferred.zipWith(
+        external,
+        (inf, ext) -> Metrics.builder()
+            .inferredMetrics(inf)
+            .ioRates(ext.ioRates())
+            .perBrokerScrapedMetrics(ext.perBrokerMetrics())
+            .build()
+    ).doOnNext(this::sendMetricsToSink);
+  }
+
+  private void sendMetricsToSink(Metrics metrics) {
+    sink.send(prepareMetricsForSending(metrics))
+        .doOnError(th -> log.warn("Error sending metrics to metrics sink", th))
+        .subscribe();
+  }
+
+  private Stream<MetricFamilySamples> prepareMetricsForSending(Metrics metrics) {
+    return PrometheusExpose.prepareMetricsForGlobalExpose(clusterName, metrics);
+  }
+
+  private Mono<PerBrokerScrapedMetrics> scrapeExternal(Collection<Node> nodes) {
+    if (jmxMetricsScraper != null) {
+      return jmxMetricsScraper.scrape(nodes);
+    }
+    if (prometheusScraper != null) {
+      return prometheusScraper.scrape(nodes);
+    }
+    return Mono.just(PerBrokerScrapedMetrics.empty());
+  }
+
+}

+ 19 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/PerBrokerScrapedMetrics.java

@@ -0,0 +1,19 @@
+package com.provectus.kafka.ui.service.metrics.scrape;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+
+import com.provectus.kafka.ui.model.Metrics;
+import java.util.List;
+import java.util.Map;
+
+public record PerBrokerScrapedMetrics(Map<Integer, List<MetricFamilySamples>> perBrokerMetrics) {
+
+  static PerBrokerScrapedMetrics empty() {
+    return new PerBrokerScrapedMetrics(Map.of());
+  }
+
+  Metrics.IoRates ioRates() {
+    return new IoRatesMetricsScanner(perBrokerMetrics).get();
+  }
+
+}

+ 198 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/ScrapedClusterState.java

@@ -0,0 +1,198 @@
+package com.provectus.kafka.ui.service.metrics.scrape;
+
+import static com.provectus.kafka.ui.model.InternalLogDirStats.LogDirSpaceStats;
+import static com.provectus.kafka.ui.model.InternalLogDirStats.SegmentStats;
+import static com.provectus.kafka.ui.service.ReactiveAdminClient.ClusterDescription;
+
+import com.google.common.collect.Table;
+import com.provectus.kafka.ui.model.InternalLogDirStats;
+import com.provectus.kafka.ui.model.InternalPartitionsOffsets;
+import com.provectus.kafka.ui.service.ReactiveAdminClient;
+import jakarta.annotation.Nullable;
+import java.time.Instant;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import lombok.Builder;
+import lombok.RequiredArgsConstructor;
+import lombok.Value;
+import org.apache.kafka.clients.admin.ConfigEntry;
+import org.apache.kafka.clients.admin.ConsumerGroupDescription;
+import org.apache.kafka.clients.admin.ConsumerGroupListing;
+import org.apache.kafka.clients.admin.OffsetSpec;
+import org.apache.kafka.clients.admin.TopicDescription;
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.TopicPartition;
+import reactor.core.publisher.Mono;
+
+@Builder(toBuilder = true)
+@RequiredArgsConstructor
+@Value
+public class ScrapedClusterState {
+
+  Instant scrapeFinishedAt;
+  Map<Integer, NodeState> nodesStates;
+  Map<String, TopicState> topicStates;
+  Map<String, ConsumerGroupState> consumerGroupsStates;
+
+  public record NodeState(int id,
+                          Node node,
+                          @Nullable SegmentStats segmentStats,
+                          @Nullable LogDirSpaceStats logDirSpaceStats) {
+  }
+
+  public record TopicState(
+      String name,
+      TopicDescription description,
+      List<ConfigEntry> configs,
+      Map<Integer, Long> startOffsets,
+      Map<Integer, Long> endOffsets,
+      @Nullable SegmentStats segmentStats,
+      @Nullable Map<Integer, SegmentStats> partitionsSegmentStats) {
+  }
+
+  public record ConsumerGroupState(
+      String group,
+      ConsumerGroupDescription description,
+      Map<TopicPartition, Long> committedOffsets) {
+  }
+
+  public static ScrapedClusterState empty() {
+    return ScrapedClusterState.builder()
+        .scrapeFinishedAt(Instant.now())
+        .nodesStates(Map.of())
+        .topicStates(Map.of())
+        .consumerGroupsStates(Map.of())
+        .build();
+  }
+
+  public ScrapedClusterState updateTopics(Map<String, TopicDescription> descriptions,
+                                          Map<String, List<ConfigEntry>> configs,
+                                          InternalPartitionsOffsets partitionsOffsets) {
+    var updatedTopicStates = new HashMap<>(topicStates);
+    descriptions.forEach((topic, description) -> {
+      SegmentStats segmentStats = null;
+      Map<Integer, SegmentStats> partitionsSegmentStats = null;
+      if (topicStates.containsKey(topic)) {
+        segmentStats = topicStates.get(topic).segmentStats();
+        partitionsSegmentStats = topicStates.get(topic).partitionsSegmentStats();
+      }
+      updatedTopicStates.put(
+          topic,
+          new TopicState(
+              topic,
+              description,
+              configs.getOrDefault(topic, List.of()),
+              partitionsOffsets.topicOffsets(topic, true),
+              partitionsOffsets.topicOffsets(topic, false),
+              segmentStats,
+              partitionsSegmentStats
+          )
+      );
+    });
+    return toBuilder()
+        .topicStates(updatedTopicStates)
+        .build();
+  }
+
+  public ScrapedClusterState topicDeleted(String topic) {
+    var newTopicStates = new HashMap<>(topicStates);
+    newTopicStates.remove(topic);
+    return toBuilder()
+        .topicStates(newTopicStates)
+        .build();
+  }
+
+  public static Mono<ScrapedClusterState> scrape(ClusterDescription clusterDescription,
+                                                 ReactiveAdminClient ac) {
+    return Mono.zip(
+        ac.describeLogDirs(clusterDescription.getNodes().stream().map(Node::id).toList())
+            .map(InternalLogDirStats::new),
+        ac.listConsumerGroups().map(l -> l.stream().map(ConsumerGroupListing::groupId).toList()),
+        ac.describeTopics(),
+        ac.getTopicsConfig()
+    ).flatMap(phase1 ->
+        Mono.zip(
+            ac.listOffsets(phase1.getT3().values(), OffsetSpec.latest()),
+            ac.listOffsets(phase1.getT3().values(), OffsetSpec.earliest()),
+            ac.describeConsumerGroups(phase1.getT2()),
+            ac.listConsumerGroupOffsets(phase1.getT2(), null)
+        ).map(phase2 ->
+            create(
+                clusterDescription,
+                phase1.getT1(),
+                phase1.getT3(),
+                phase1.getT4(),
+                phase2.getT1(),
+                phase2.getT2(),
+                phase2.getT3(),
+                phase2.getT4()
+            )));
+  }
+
+  private static ScrapedClusterState create(ClusterDescription clusterDescription,
+                                            InternalLogDirStats segmentStats,
+                                            Map<String, TopicDescription> topicDescriptions,
+                                            Map<String, List<ConfigEntry>> topicConfigs,
+                                            Map<TopicPartition, Long> latestOffsets,
+                                            Map<TopicPartition, Long> earliestOffsets,
+                                            Map<String, ConsumerGroupDescription> consumerDescriptions,
+                                            Table<String, TopicPartition, Long> consumerOffsets) {
+
+
+    Map<String, TopicState> topicStates = new HashMap<>();
+    topicDescriptions.forEach((name, desc) ->
+        topicStates.put(
+            name,
+            new TopicState(
+                name,
+                desc,
+                topicConfigs.getOrDefault(name, List.of()),
+                filterTopic(name, earliestOffsets),
+                filterTopic(name, latestOffsets),
+                segmentStats.getTopicStats().get(name),
+                Optional.ofNullable(segmentStats.getPartitionsStats())
+                    .map(topicForFilter -> filterTopic(name, topicForFilter))
+                    .orElse(null)
+            )));
+
+    Map<String, ConsumerGroupState> consumerGroupsStates = new HashMap<>();
+    consumerDescriptions.forEach((name, desc) ->
+        consumerGroupsStates.put(
+            name,
+            new ConsumerGroupState(
+                name,
+                desc,
+                consumerOffsets.row(name)
+            )));
+
+    Map<Integer, NodeState> nodesStates = new HashMap<>();
+    clusterDescription.getNodes().forEach(node ->
+        nodesStates.put(
+            node.id(),
+            new NodeState(
+                node.id(),
+                node,
+                segmentStats.getBrokerStats().get(node.id()),
+                segmentStats.getBrokerDirsStats().get(node.id())
+            )));
+
+    return new ScrapedClusterState(
+        Instant.now(),
+        nodesStates,
+        topicStates,
+        consumerGroupsStates
+    );
+  }
+
+  private static <T> Map<Integer, T> filterTopic(String topicForFilter, Map<TopicPartition, T> tpMap) {
+    return tpMap.entrySet()
+        .stream()
+        .filter(tp -> tp.getKey().topic().equals(topicForFilter))
+        .collect(Collectors.toMap(e -> e.getKey().partition(), Map.Entry::getValue));
+  }
+
+
+}

+ 24 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/inferred/InferredMetrics.java

@@ -0,0 +1,24 @@
+package com.provectus.kafka.ui.service.metrics.scrape.inferred;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+
+import java.util.List;
+import java.util.stream.Stream;
+
+public class InferredMetrics {
+
+  private final List<MetricFamilySamples> metrics;
+
+  public static InferredMetrics empty() {
+    return new InferredMetrics(List.of());
+  }
+
+  public InferredMetrics(List<MetricFamilySamples> metrics) {
+    this.metrics = metrics;
+  }
+
+  public Stream<MetricFamilySamples> asStream() {
+    return metrics.stream();
+  }
+
+}

+ 226 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/inferred/InferredMetricsScraper.java

@@ -0,0 +1,226 @@
+package com.provectus.kafka.ui.service.metrics.scrape.inferred;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
+import io.prometheus.client.Collector.MetricFamilySamples;
+import io.prometheus.client.GaugeMetricFamily;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import javax.annotation.Nullable;
+import lombok.RequiredArgsConstructor;
+import lombok.extern.slf4j.Slf4j;
+import org.apache.kafka.clients.admin.MemberDescription;
+import org.apache.kafka.common.Node;
+import reactor.core.publisher.Mono;
+
+@Slf4j
+@RequiredArgsConstructor
+public class InferredMetricsScraper {
+
+  private ScrapedClusterState prevState = null;
+
+  public synchronized Mono<InferredMetrics> scrape(ScrapedClusterState newState) {
+    var inferred = infer(prevState, newState);
+    this.prevState = newState;
+    return Mono.just(inferred);
+  }
+
+  @VisibleForTesting
+  static InferredMetrics infer(@Nullable ScrapedClusterState prevState, ScrapedClusterState newState) {
+    var registry = new MetricsRegistry();
+    fillNodesMetrics(registry, newState);
+    fillTopicMetrics(registry, newState);
+    fillConsumerGroupsMetrics(registry, newState);
+    List<MetricFamilySamples> metrics = registry.metrics.values().stream().toList();
+    log.debug("{} metric families inferred from cluster state", metrics.size());
+    return new InferredMetrics(metrics);
+  }
+
+  private static class MetricsRegistry {
+
+    final Map<String, MetricFamilySamples> metrics = new LinkedHashMap<>();
+
+    void gauge(String name,
+               String help,
+               List<String> lbls,
+               List<String> lblVals,
+               Number value) {
+      GaugeMetricFamily gauge;
+      if ((gauge = (GaugeMetricFamily) metrics.get(name)) == null) {
+        gauge = new GaugeMetricFamily(name, help, lbls);
+        metrics.put(name, gauge);
+      }
+      gauge.addMetric(lblVals, value.doubleValue());
+    }
+  }
+
+  private static void fillNodesMetrics(MetricsRegistry registry, ScrapedClusterState newState) {
+    registry.gauge(
+        "broker_count",
+        "Number of brokers in the Kafka cluster",
+        List.of(),
+        List.of(),
+        newState.getNodesStates().size()
+    );
+
+    newState.getNodesStates().forEach((nodeId, state) -> {
+      if (state.segmentStats() != null) {
+        registry.gauge(
+            "broker_bytes_disk",
+            "Written disk size in bytes of a broker",
+            List.of("node_id"),
+            List.of(nodeId.toString()),
+            state.segmentStats().getSegmentSize()
+        );
+      }
+      if (state.logDirSpaceStats() != null) {
+        if (state.logDirSpaceStats().usableBytes() != null) {
+          registry.gauge(
+              "broker_bytes_usable",
+              "Usable disk size in bytes of a broker",
+              List.of("node_id"),
+              List.of(nodeId.toString()),
+              state.logDirSpaceStats().usableBytes()
+          );
+        }
+        if (state.logDirSpaceStats().totalBytes() != null) {
+          registry.gauge(
+              "broker_bytes_total",
+              "Total disk size in bytes of a broker",
+              List.of("node_id"),
+              List.of(nodeId.toString()),
+              state.logDirSpaceStats().totalBytes()
+          );
+        }
+      }
+    });
+  }
+
+  private static void fillTopicMetrics(MetricsRegistry registry, ScrapedClusterState clusterState) {
+    registry.gauge(
+        "topic_count",
+        "Number of topics in the Kafka cluster",
+        List.of(),
+        List.of(),
+        clusterState.getTopicStates().size()
+    );
+
+    clusterState.getTopicStates().forEach((topicName, state) -> {
+      registry.gauge(
+          "kafka_topic_partitions",
+          "Number of partitions for this Topic",
+          List.of("topic"),
+          List.of(topicName),
+          state.description().partitions().size()
+      );
+      state.endOffsets().forEach((partition, endOffset) -> {
+        registry.gauge(
+            "kafka_topic_partition_current_offset",
+            "Current Offset of a Broker at Topic/Partition",
+            List.of("topic", "partition"),
+            List.of(topicName, String.valueOf(partition)),
+            endOffset
+        );
+      });
+      state.startOffsets().forEach((partition, startOffset) -> {
+        registry.gauge(
+            "kafka_topic_partition_oldest_offset",
+            "Oldest Offset of a Broker at Topic/Partition",
+            List.of("topic", "partition"),
+            List.of(topicName, String.valueOf(partition)),
+            startOffset
+        );
+      });
+      state.description().partitions().forEach(p -> {
+        registry.gauge(
+            "kafka_topic_partition_in_sync_replica",
+            "Number of In-Sync Replicas for this Topic/Partition",
+            List.of("topic", "partition"),
+            List.of(topicName, String.valueOf(p.partition())),
+            p.isr().size()
+        );
+        registry.gauge(
+            "kafka_topic_partition_replicas",
+            "Number of Replicas for this Topic/Partition",
+            List.of("topic", "partition"),
+            List.of(topicName, String.valueOf(p.partition())),
+            p.replicas().size()
+        );
+        registry.gauge(
+            "kafka_topic_partition_leader",
+            "Leader Broker ID of this Topic/Partition (-1, if no leader)",
+            List.of("topic", "partition"),
+            List.of(topicName, String.valueOf(p.partition())),
+            Optional.ofNullable(p.leader()).map(Node::id).orElse(-1)
+        );
+      });
+      if (state.segmentStats() != null) {
+        registry.gauge(
+            "topic_bytes_disk",
+            "Disk size in bytes of a topic",
+            List.of("topic"),
+            List.of(topicName),
+            state.segmentStats().getSegmentSize()
+        );
+      }
+    });
+  }
+
+  private static void fillConsumerGroupsMetrics(MetricsRegistry registry, ScrapedClusterState clusterState) {
+    registry.gauge(
+        "group_count",
+        "Number of consumer groups in the Kafka cluster",
+        List.of(),
+        List.of(),
+        clusterState.getConsumerGroupsStates().size()
+    );
+
+    clusterState.getConsumerGroupsStates().forEach((groupName, state) -> {
+      registry.gauge(
+          "group_state",
+          "State of the consumer group, value = ordinal of org.apache.kafka.common.ConsumerGroupState",
+          List.of("group"),
+          List.of(groupName),
+          state.description().state().ordinal()
+      );
+      registry.gauge(
+          "group_member_count",
+          "Number of member assignments in the consumer group.",
+          List.of("group"),
+          List.of(groupName),
+          state.description().members().size()
+      );
+      registry.gauge(
+          "group_host_count",
+          "Number of distinct hosts in the consumer group.",
+          List.of("group"),
+          List.of(groupName),
+          state.description().members().stream().map(MemberDescription::host).distinct().count()
+      );
+
+      state.committedOffsets().forEach((tp, committedOffset) -> {
+        registry.gauge(
+            "kafka_consumergroup_current_offset",
+            "Current Offset of a ConsumerGroup at Topic/Partition",
+            List.of("consumergroup", "topic", "partition"),
+            List.of(groupName, tp.topic(), String.valueOf(tp.partition())),
+            committedOffset
+        );
+
+        Optional.ofNullable(clusterState.getTopicStates().get(tp.topic()))
+            .flatMap(s -> Optional.ofNullable(s.endOffsets().get(tp.partition())))
+            .ifPresent(endOffset ->
+                registry.gauge(
+                    "kafka_consumergroup_lag",
+                    "Current Approximate Lag of a ConsumerGroup at Topic/Partition",
+                    List.of("consumergroup", "topic", "partition"),
+                    List.of(groupName, tp.topic(), String.valueOf(tp.partition())),
+                    endOffset - committedOffset //TODO: check +-1
+                ));
+
+      });
+    });
+  }
+}

+ 3 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/JmxMetricsFormatter.java → kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxMetricsFormatter.java

@@ -1,5 +1,7 @@
-package com.provectus.kafka.ui.service.metrics;
+package com.provectus.kafka.ui.service.metrics.scrape.jmx;
 
+import com.provectus.kafka.ui.service.metrics.RawMetric;
+import io.prometheus.client.Collector;
 import java.math.BigDecimal;
 import java.util.ArrayList;
 import java.util.LinkedHashMap;

+ 31 - 31
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/JmxMetricsRetriever.java → kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxMetricsRetriever.java

@@ -1,6 +1,7 @@
-package com.provectus.kafka.ui.service.metrics;
+package com.provectus.kafka.ui.service.metrics.scrape.jmx;
 
-import com.provectus.kafka.ui.model.KafkaCluster;
+import com.provectus.kafka.ui.model.MetricsScrapeProperties;
+import com.provectus.kafka.ui.service.metrics.RawMetric;
 import java.io.Closeable;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -17,15 +18,15 @@ import lombok.SneakyThrows;
 import lombok.extern.slf4j.Slf4j;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.kafka.common.Node;
+import org.springframework.stereotype.Component;
 import org.springframework.stereotype.Service;
-import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 import reactor.core.scheduler.Schedulers;
 
 
-@Service
+@Component //need to be a component, since
 @Slf4j
-class JmxMetricsRetriever implements MetricsRetriever, Closeable {
+public class JmxMetricsRetriever implements Closeable {
 
   private static final boolean SSL_JMX_SUPPORTED;
 
@@ -43,35 +44,34 @@ class JmxMetricsRetriever implements MetricsRetriever, Closeable {
     JmxSslSocketFactory.clearFactoriesCache();
   }
 
-  @Override
-  public Flux<RawMetric> retrieve(KafkaCluster c, Node node) {
-    if (isSslJmxEndpoint(c) && !SSL_JMX_SUPPORTED) {
-      log.warn("Cluster {} has jmx ssl configured, but it is not supported", c.getName());
-      return Flux.empty();
+  public Mono<List<RawMetric>> retrieveFromNode(MetricsScrapeProperties scrapeProperties, Node node) {
+    if (isSslJmxEndpoint(scrapeProperties) && !SSL_JMX_SUPPORTED) {
+      log.warn("Cluster has jmx ssl configured, but it is not supported by app");
+      return Mono.just(List.of());
     }
-    return Mono.fromSupplier(() -> retrieveSync(c, node))
-        .subscribeOn(Schedulers.boundedElastic())
-        .flatMapMany(Flux::fromIterable);
+    return Mono.fromSupplier(() -> retrieveSync(scrapeProperties, node))
+        .subscribeOn(Schedulers.boundedElastic());
   }
 
-  private boolean isSslJmxEndpoint(KafkaCluster cluster) {
-    return cluster.getMetricsConfig().getKeystoreLocation() != null;
+  private boolean isSslJmxEndpoint(MetricsScrapeProperties scrapeProperties) {
+    return scrapeProperties.getKeystoreConfig() != null
+        && scrapeProperties.getKeystoreConfig().getKeystoreLocation() != null;
   }
 
   @SneakyThrows
-  private List<RawMetric> retrieveSync(KafkaCluster c, Node node) {
-    String jmxUrl = JMX_URL + node.host() + ":" + c.getMetricsConfig().getPort() + "/" + JMX_SERVICE_TYPE;
+  private List<RawMetric> retrieveSync(MetricsScrapeProperties scrapeProperties, Node node) {
+    String jmxUrl = JMX_URL + node.host() + ":" + scrapeProperties.getPort() + "/" + JMX_SERVICE_TYPE;
     log.debug("Collection JMX metrics for {}", jmxUrl);
     List<RawMetric> result = new ArrayList<>();
-    withJmxConnector(jmxUrl, c, jmxConnector -> getMetricsFromJmx(jmxConnector, result));
+    withJmxConnector(jmxUrl, scrapeProperties, jmxConnector -> getMetricsFromJmx(jmxConnector, result));
     log.debug("{} metrics collected for {}", result.size(), jmxUrl);
     return result;
   }
 
   private void withJmxConnector(String jmxUrl,
-                                KafkaCluster c,
+                                MetricsScrapeProperties scrapeProperties,
                                 Consumer<JMXConnector> consumer) {
-    var env = prepareJmxEnvAndSetThreadLocal(c);
+    var env = prepareJmxEnvAndSetThreadLocal(scrapeProperties);
     try (JMXConnector connector = JMXConnectorFactory.newJMXConnector(new JMXServiceURL(jmxUrl), env)) {
       try {
         connector.connect(env);
@@ -87,25 +87,25 @@ class JmxMetricsRetriever implements MetricsRetriever, Closeable {
     }
   }
 
-  private Map<String, Object> prepareJmxEnvAndSetThreadLocal(KafkaCluster cluster) {
-    var metricsConfig = cluster.getMetricsConfig();
+  private Map<String, Object> prepareJmxEnvAndSetThreadLocal(MetricsScrapeProperties scrapeProperties) {
     Map<String, Object> env = new HashMap<>();
-    if (isSslJmxEndpoint(cluster)) {
-      var clusterSsl = cluster.getOriginalProperties().getSsl();
+    if (isSslJmxEndpoint(scrapeProperties)) {
+      var truststoreConfig = scrapeProperties.getTruststoreConfig();
+      var keystoreConfig = scrapeProperties.getKeystoreConfig();
       JmxSslSocketFactory.setSslContextThreadLocal(
-          clusterSsl != null ? clusterSsl.getTruststoreLocation() : null,
-          clusterSsl != null ? clusterSsl.getTruststorePassword() : null,
-          metricsConfig.getKeystoreLocation(),
-          metricsConfig.getKeystorePassword()
+          truststoreConfig != null ? truststoreConfig.getTruststoreLocation() : null,
+          truststoreConfig != null ? truststoreConfig.getTruststorePassword() : null,
+          keystoreConfig != null ? keystoreConfig.getKeystoreLocation() : null,
+          keystoreConfig != null ? keystoreConfig.getKeystorePassword() : null
       );
       JmxSslSocketFactory.editJmxConnectorEnv(env);
     }
 
-    if (StringUtils.isNotEmpty(metricsConfig.getUsername())
-        && StringUtils.isNotEmpty(metricsConfig.getPassword())) {
+    if (StringUtils.isNotEmpty(scrapeProperties.getUsername())
+        && StringUtils.isNotEmpty(scrapeProperties.getPassword())) {
       env.put(
           JMXConnector.CREDENTIALS,
-          new String[] {metricsConfig.getUsername(), metricsConfig.getPassword()}
+          new String[] {scrapeProperties.getUsername(), scrapeProperties.getPassword()}
       );
     }
     return env;

+ 36 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxMetricsScraper.java

@@ -0,0 +1,36 @@
+package com.provectus.kafka.ui.service.metrics.scrape.jmx;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+
+import com.provectus.kafka.ui.model.MetricsScrapeProperties;
+import com.provectus.kafka.ui.service.metrics.RawMetric;
+import com.provectus.kafka.ui.service.metrics.scrape.PerBrokerScrapedMetrics;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import org.apache.kafka.common.Node;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import reactor.util.function.Tuples;
+
+public class JmxMetricsScraper  {
+
+  private final JmxMetricsRetriever jmxMetricsRetriever;
+  private final MetricsScrapeProperties scrapeProperties;
+
+  public JmxMetricsScraper(MetricsScrapeProperties scrapeProperties,
+                           JmxMetricsRetriever jmxMetricsRetriever) {
+    this.scrapeProperties = scrapeProperties;
+    this.jmxMetricsRetriever = jmxMetricsRetriever;
+  }
+
+  public Mono<PerBrokerScrapedMetrics> scrape(Collection<Node> nodes) {
+    Mono<Map<Integer, List<MetricFamilySamples>>> collected = Flux.fromIterable(nodes)
+        .flatMap(n -> jmxMetricsRetriever.retrieveFromNode(scrapeProperties, n).map(metrics -> Tuples.of(n, metrics)))
+        .collectMap(
+            t -> t.getT1().id(),
+            t -> RawMetric.groupIntoMfs(t.getT2()).toList()
+        );
+    return collected.map(PerBrokerScrapedMetrics::new);
+  }
+}

+ 2 - 3
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/JmxSslSocketFactory.java → kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxSslSocketFactory.java

@@ -1,4 +1,4 @@
-package com.provectus.kafka.ui.service.metrics;
+package com.provectus.kafka.ui.service.metrics.scrape.jmx;
 
 import com.google.common.base.Preconditions;
 import java.io.FileInputStream;
@@ -61,9 +61,8 @@ class JmxSslSocketFactory extends javax.net.ssl.SSLSocketFactory {
     } catch (Exception e) {
       log.error("----------------------------------");
       log.error("SSL can't be enabled for JMX retrieval. "
-              + "Make sure your java app run with '--add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED' arg. Err: {}",
+              + "Make sure your java app is running with '--add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED' arg. Err: {}",
           e.getMessage());
-      log.trace("SSL can't be enabled for JMX retrieval", e);
       log.error("----------------------------------");
     }
     SSL_JMX_SUPPORTED = sslJmxSupported;

+ 317 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusEndpointParser.java

@@ -0,0 +1,317 @@
+package com.provectus.kafka.ui.service.metrics.scrape.prometheus;
+
+import static io.prometheus.client.Collector.MetricFamilySamples.Sample;
+
+import com.google.common.base.Enums;
+import io.prometheus.client.Collector.MetricFamilySamples;
+import io.prometheus.client.Collector.Type;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Stream;
+
+public class PrometheusEndpointParser {
+
+  // will be set if no TYPE provided (or it is unsupported)
+  private static final Type DEFAULT_TYPE = Type.GAUGE;
+
+  private PrometheusEndpointParser() {
+  }
+
+  private static class ParserContext {
+    final List<MetricFamilySamples> registered = new ArrayList<>();
+
+    String name;
+    String help;
+    Type type;
+    String unit;
+    Set<String> allowedNames = new HashSet<>();
+    List<Sample> samples = new ArrayList<>();
+
+    void registerAndReset() {
+      if (!samples.isEmpty()) {
+        registered.add(
+            new MetricFamilySamples(
+                name,
+                Optional.ofNullable(unit).orElse(""),
+                type,
+                Optional.ofNullable(help).orElse(name),
+                List.copyOf(samples))
+        );
+      }
+      //resetting state:
+      name = null;
+      help = null;
+      type = null;
+      unit = null;
+      allowedNames.clear();
+      samples.clear();
+    }
+
+    List<MetricFamilySamples> getRegistered() {
+      registerAndReset(); // last in progress metric should be registered
+      return registered;
+    }
+  }
+
+  // general logic taken from https://github.com/prometheus/client_python/blob/master/prometheus_client/parser.py
+  public static List<MetricFamilySamples> parse(Stream<String> lines) {
+    ParserContext context = new ParserContext();
+    lines.map(String::trim)
+        .filter(s -> !s.isBlank())
+        .forEach(line -> {
+          if (line.charAt(0) == '#') {
+            String[] parts = line.split("[ \t]+", 4);
+            if (parts.length >= 3) {
+              switch (parts[1]) {
+                case "HELP" -> processHelp(context, parts);
+                case "TYPE" -> processType(context, parts);
+                case "UNIT" -> processUnit(context, parts);
+                default -> { /* probably a comment */ }
+              }
+            }
+          } else {
+            processSample(context, line);
+          }
+        });
+    return context.getRegistered();
+  }
+
+  private static void processUnit(ParserContext context, String[] parts) {
+    if (!parts[2].equals(context.name)) {
+      // starting new metric family - need to register (if possible) prev one
+      context.registerAndReset();
+      context.name = parts[2];
+      context.type = DEFAULT_TYPE;
+      context.allowedNames.add(context.name);
+    }
+    if (parts.length == 4) {
+      context.unit = parts[3];
+    }
+  }
+
+  private static void processHelp(ParserContext context, String[] parts) {
+    if (!parts[2].equals(context.name)) {
+      // starting new metric family - need to register (if possible) prev one
+      context.registerAndReset();
+      context.name = parts[2];
+      context.type = DEFAULT_TYPE;
+      context.allowedNames.add(context.name);
+    }
+    if (parts.length == 4) {
+      context.help = unescapeHelp(parts[3]);
+    }
+  }
+
+  private static void processType(ParserContext context, String[] parts) {
+    if (!parts[2].equals(context.name)) {
+      // starting new metric family - need to register (if possible) prev one
+      context.registerAndReset();
+      context.name = parts[2];
+    }
+
+    context.type = Enums.getIfPresent(Type.class, parts[3].toUpperCase()).or(DEFAULT_TYPE);
+    switch (context.type) {
+      case SUMMARY -> {
+        context.allowedNames.add(context.name);
+        context.allowedNames.add(context.name + "_count");
+        context.allowedNames.add(context.name + "_sum");
+        context.allowedNames.add(context.name + "_created");
+      }
+      case HISTOGRAM -> {
+        context.allowedNames.add(context.name + "_count");
+        context.allowedNames.add(context.name + "_sum");
+        context.allowedNames.add(context.name + "_bucket");
+        context.allowedNames.add(context.name + "_created");
+      }
+      case COUNTER -> {
+        context.allowedNames.add(context.name);
+        context.allowedNames.add(context.name + "_total");
+        context.allowedNames.add(context.name + "_created");
+      }
+      case INFO -> {
+        context.allowedNames.add(context.name);
+        context.allowedNames.add(context.name + "_info");
+      }
+      default -> context.allowedNames.add(context.name);
+    }
+  }
+
+  private static void processSample(ParserContext context, String line) {
+    parseSampleLine(line).ifPresent(sample -> {
+      if (!context.allowedNames.contains(sample.name)) {
+        // starting new metric family - need to register (if possible) prev one
+        context.registerAndReset();
+        context.name = sample.name;
+        context.type = DEFAULT_TYPE;
+        context.allowedNames.add(sample.name);
+      }
+      context.samples.add(sample);
+    });
+  }
+
+  private static String unescapeHelp(String text) {
+    // algorithm from https://github.com/prometheus/client_python/blob/a2dae6caeaf3c300db416ba10a2a3271693addd4/prometheus_client/parser.py
+    if (text == null || !text.contains("\\")) {
+      return text;
+    }
+    StringBuilder result = new StringBuilder();
+    boolean slash = false;
+    for (int c = 0; c < text.length(); c++) {
+      char charAt = text.charAt(c);
+      if (slash) {
+        if (charAt == '\\') {
+          result.append('\\');
+        } else if (charAt == 'n') {
+          result.append('\n');
+        } else {
+          result.append('\\').append(charAt);
+        }
+        slash = false;
+      } else {
+        if (charAt == '\\') {
+          slash = true;
+        } else {
+          result.append(charAt);
+        }
+      }
+    }
+    if (slash) {
+      result.append("\\");
+    }
+    return result.toString();
+  }
+
+  //returns empty if line is not valid sample string
+  private static Optional<Sample> parseSampleLine(String line) {
+    // algorithm copied from https://github.com/prometheus/client_python/blob/a2dae6caeaf3c300db416ba10a2a3271693addd4/prometheus_client/parser.py
+    StringBuilder name = new StringBuilder();
+    StringBuilder labelname = new StringBuilder();
+    StringBuilder labelvalue = new StringBuilder();
+    StringBuilder value = new StringBuilder();
+    List<String> lblNames = new ArrayList<>();
+    List<String> lblVals = new ArrayList<>();
+
+    String state = "name";
+
+    for (int c = 0; c < line.length(); c++) {
+      char charAt = line.charAt(c);
+      if (state.equals("name")) {
+        if (charAt == '{') {
+          state = "startoflabelname";
+        } else if (charAt == ' ' || charAt == '\t') {
+          state = "endofname";
+        } else {
+          name.append(charAt);
+        }
+      } else if (state.equals("endofname")) {
+        if (charAt == ' ' || charAt == '\t') {
+          // do nothing
+        } else if (charAt == '{') {
+          state = "startoflabelname";
+        } else {
+          value.append(charAt);
+          state = "value";
+        }
+      } else if (state.equals("startoflabelname")) {
+        if (charAt == ' ' || charAt == '\t') {
+          // do nothing
+        } else if (charAt == '}') {
+          state = "endoflabels";
+        } else {
+          labelname.append(charAt);
+          state = "labelname";
+        }
+      } else if (state.equals("labelname")) {
+        if (charAt == '=') {
+          state = "labelvaluequote";
+        } else if (charAt == '}') {
+          state = "endoflabels";
+        } else if (charAt == ' ' || charAt == '\t') {
+          state = "labelvalueequals";
+        } else {
+          labelname.append(charAt);
+        }
+      } else if (state.equals("labelvalueequals")) {
+        if (charAt == '=') {
+          state = "labelvaluequote";
+        } else if (charAt == ' ' || charAt == '\t') {
+          // do nothing
+        } else {
+          return Optional.empty();
+        }
+      } else if (state.equals("labelvaluequote")) {
+        if (charAt == '"') {
+          state = "labelvalue";
+        } else if (charAt == ' ' || charAt == '\t') {
+          // do nothing
+        } else {
+          return Optional.empty();
+        }
+      } else if (state.equals("labelvalue")) {
+        if (charAt == '\\') {
+          state = "labelvalueslash";
+        } else if (charAt == '"') {
+          lblNames.add(labelname.toString());
+          lblVals.add(labelvalue.toString());
+          labelname.setLength(0);
+          labelvalue.setLength(0);
+          state = "nextlabel";
+        } else {
+          labelvalue.append(charAt);
+        }
+      } else if (state.equals("labelvalueslash")) {
+        state = "labelvalue";
+        if (charAt == '\\') {
+          labelvalue.append('\\');
+        } else if (charAt == 'n') {
+          labelvalue.append('\n');
+        } else if (charAt == '"') {
+          labelvalue.append('"');
+        } else {
+          labelvalue.append('\\').append(charAt);
+        }
+      } else if (state.equals("nextlabel")) {
+        if (charAt == ',') {
+          state = "labelname";
+        } else if (charAt == '}') {
+          state = "endoflabels";
+        } else if (charAt == ' ' || charAt == '\t') {
+          // do nothing
+        } else {
+          return Optional.empty();
+        }
+      } else if (state.equals("endoflabels")) {
+        if (charAt == ' ' || charAt == '\t') {
+          // do nothing
+        } else {
+          value.append(charAt);
+          state = "value";
+        }
+      } else if (state.equals("value")) {
+        if (charAt == ' ' || charAt == '\t') {
+          break; // timestamps are NOT supported - ignoring
+        } else {
+          value.append(charAt);
+        }
+      }
+    }
+    return Optional.of(new Sample(name.toString(), lblNames, lblVals, parseDouble(value.toString())));
+  }
+
+  private static double parseDouble(String valueString) {
+    if (valueString.equalsIgnoreCase("NaN")) {
+      return Double.NaN;
+    } else if (valueString.equalsIgnoreCase("+Inf")) {
+      return Double.POSITIVE_INFINITY;
+    } else if (valueString.equalsIgnoreCase("-Inf")) {
+      return Double.NEGATIVE_INFINITY;
+    }
+    return Double.parseDouble(valueString);
+  }
+
+
+}
+

+ 54 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetriever.java

@@ -0,0 +1,54 @@
+package com.provectus.kafka.ui.service.metrics.scrape.prometheus;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+
+import com.provectus.kafka.ui.model.MetricsScrapeProperties;
+import com.provectus.kafka.ui.util.WebClientConfigurator;
+import java.util.List;
+import java.util.Optional;
+import lombok.extern.slf4j.Slf4j;
+import org.springframework.util.unit.DataSize;
+import org.springframework.web.reactive.function.client.WebClient;
+import org.springframework.web.util.UriComponentsBuilder;
+import reactor.core.publisher.Mono;
+
+@Slf4j
+class PrometheusMetricsRetriever {
+
+  private static final String METRICS_ENDPOINT_PATH = "/metrics";
+  private static final int DEFAULT_EXPORTER_PORT = 11001;
+
+  private final int port;
+  private final boolean sslEnabled;
+  private final WebClient webClient;
+
+  PrometheusMetricsRetriever(MetricsScrapeProperties scrapeProperties) {
+    this.port = Optional.ofNullable(scrapeProperties.getPort()).orElse(DEFAULT_EXPORTER_PORT);
+    this.sslEnabled = scrapeProperties.isSsl() || scrapeProperties.getKeystoreConfig() != null;
+    this.webClient = new WebClientConfigurator()
+        .configureBufferSize(DataSize.ofMegabytes(20))
+        .configureBasicAuth(scrapeProperties.getUsername(), scrapeProperties.getPassword())
+        .configureSsl(scrapeProperties.getTruststoreConfig(), scrapeProperties.getKeystoreConfig())
+        .build();
+  }
+
+  Mono<List<MetricFamilySamples>> retrieve(String host) {
+    log.debug("Retrieving metrics from prometheus endpoint: {}:{}", host, port);
+
+    var uri = UriComponentsBuilder.newInstance()
+        .scheme(sslEnabled ? "https" : "http")
+        .host(host)
+        .port(port)
+        .path(METRICS_ENDPOINT_PATH)
+        .build()
+        .toUri();
+
+    return webClient.get()
+        .uri(uri)
+        .retrieve()
+        .bodyToMono(String.class)
+        .doOnError(e -> log.error("Error while getting metrics from {}", host, e))
+        .map(body -> PrometheusEndpointParser.parse(body.lines()))
+        .onErrorResume(th -> Mono.just(List.of()));
+  }
+}

+ 30 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusScraper.java

@@ -0,0 +1,30 @@
+package com.provectus.kafka.ui.service.metrics.scrape.prometheus;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+
+import com.provectus.kafka.ui.model.MetricsScrapeProperties;
+import com.provectus.kafka.ui.service.metrics.scrape.PerBrokerScrapedMetrics;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import org.apache.kafka.common.Node;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+import reactor.util.function.Tuple2;
+import reactor.util.function.Tuples;
+
+public class PrometheusScraper {
+
+  private final PrometheusMetricsRetriever retriever;
+
+  public PrometheusScraper(MetricsScrapeProperties scrapeProperties) {
+    this.retriever = new PrometheusMetricsRetriever(scrapeProperties);
+  }
+
+  public Mono<PerBrokerScrapedMetrics> scrape(Collection<Node> clusterNodes) {
+    Mono<Map<Integer, List<MetricFamilySamples>>> collected = Flux.fromIterable(clusterNodes)
+        .flatMap(n -> retriever.retrieve(n.host()).map(metrics -> Tuples.of(n, metrics)))
+        .collectMap(t -> t.getT1().id(), Tuple2::getT2);
+    return collected.map(PerBrokerScrapedMetrics::new);
+  }
+}

+ 73 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/sink/KafkaSink.java

@@ -0,0 +1,73 @@
+package com.provectus.kafka.ui.service.metrics.sink;
+
+import static com.provectus.kafka.ui.service.MessagesService.createProducer;
+import static com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose.escapedLabelValue;
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static io.prometheus.client.Collector.doubleToGoString;
+import static org.apache.kafka.clients.producer.ProducerConfig.COMPRESSION_TYPE_CONFIG;
+
+import com.fasterxml.jackson.databind.json.JsonMapper;
+import com.provectus.kafka.ui.config.ClustersProperties;
+import java.time.Instant;
+import java.time.ZoneOffset;
+import java.time.format.DateTimeFormatter;
+import java.time.temporal.ChronoUnit;
+import java.util.LinkedHashMap;
+import java.util.Map;
+import java.util.stream.Stream;
+import lombok.RequiredArgsConstructor;
+import lombok.SneakyThrows;
+import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import reactor.core.publisher.Mono;
+
+/*
+ * Format of records copied from https://github.com/Telefonica/prometheus-kafka-adapter
+ */
+@RequiredArgsConstructor
+class KafkaSink implements MetricsSink {
+
+  record KafkaMetric(String timestamp, String value, String name, Map<String, String> labels) { }
+
+  private static final JsonMapper JSON_MAPPER = new JsonMapper();
+
+  private static final Map<String, Object> PRODUCER_ADDITIONAL_CONFIGS = Map.of(COMPRESSION_TYPE_CONFIG, "gzip");
+
+  private final String topic;
+  private final Producer<byte[], byte[]> producer;
+
+  static KafkaSink create(ClustersProperties.Cluster cluster, String targetTopic) {
+    return new KafkaSink(targetTopic, createProducer(cluster, PRODUCER_ADDITIONAL_CONFIGS));
+  }
+
+  @Override
+  public Mono<Void> send(Stream<MetricFamilySamples> metrics) {
+    return Mono.fromRunnable(() -> {
+      String ts = Instant.now()
+          .truncatedTo(ChronoUnit.SECONDS)
+          .atZone(ZoneOffset.UTC)
+          .format(DateTimeFormatter.ISO_DATE_TIME);
+
+      metrics.flatMap(m -> createRecord(ts, m)).forEach(producer::send);
+    });
+  }
+
+  private Stream<ProducerRecord<byte[], byte[]>> createRecord(String ts, MetricFamilySamples metrics) {
+    return metrics.samples.stream()
+        .map(sample -> {
+          var lbls = new LinkedHashMap<String, String>();
+          lbls.put("__name__", sample.name);
+          for (int i = 0; i < sample.labelNames.size(); i++) {
+            lbls.put(sample.labelNames.get(i), escapedLabelValue(sample.labelValues.get(i)));
+          }
+          var km = new KafkaMetric(ts, doubleToGoString(sample.value), sample.name, lbls);
+          return new ProducerRecord<>(topic, toJsonBytes(km));
+        });
+  }
+
+  @SneakyThrows
+  private static byte[] toJsonBytes(KafkaMetric m) {
+    return JSON_MAPPER.writeValueAsBytes(m);
+  }
+
+}

+ 56 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/sink/MetricsSink.java

@@ -0,0 +1,56 @@
+package com.provectus.kafka.ui.service.metrics.sink;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static org.springframework.util.StringUtils.hasText;
+
+import com.provectus.kafka.ui.config.ClustersProperties;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.stream.Stream;
+import reactor.core.publisher.Flux;
+import reactor.core.publisher.Mono;
+
+public interface MetricsSink {
+
+  static MetricsSink create(ClustersProperties.Cluster cluster) {
+    List<MetricsSink> sinks = new ArrayList<>();
+    Optional.ofNullable(cluster.getMetrics())
+        .flatMap(metrics -> Optional.ofNullable(metrics.getStore()))
+        .flatMap(store -> Optional.ofNullable(store.getPrometheus()))
+        .ifPresent(prometheusConf -> {
+          if (hasText(prometheusConf.getUrl()) && Boolean.TRUE.equals(prometheusConf.getRemoteWrite())) {
+            sinks.add(new PrometheusRemoteWriteSink(prometheusConf.getUrl(), cluster.getSsl()));
+          }
+          if (hasText(prometheusConf.getPushGatewayUrl())) {
+            sinks.add(
+                PrometheusPushGatewaySink.create(
+                    prometheusConf.getPushGatewayUrl(),
+                    prometheusConf.getPushGatewayJobName(),
+                    prometheusConf.getPushGatewayUsername(),
+                    prometheusConf.getPushGatewayPassword()
+                ));
+          }
+        });
+
+    Optional.ofNullable(cluster.getMetrics())
+        .flatMap(metrics -> Optional.ofNullable(metrics.getStore()))
+        .flatMap(store -> Optional.ofNullable(store.getKafka()))
+        .flatMap(kafka -> Optional.ofNullable(kafka.getTopic()))
+        .ifPresent(topic -> sinks.add(KafkaSink.create(cluster, topic)));
+
+    return compoundSink(sinks);
+  }
+
+  private static MetricsSink compoundSink(List<MetricsSink> sinks) {
+    return metricsStream -> {
+      var materialized = metricsStream.toList();
+      return Flux.fromIterable(sinks)
+          .flatMap(sink -> sink.send(materialized.stream()))
+          .then();
+    };
+  }
+
+  Mono<Void> send(Stream<MetricFamilySamples> metrics);
+
+}

+ 62 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/sink/PrometheusPushGatewaySink.java

@@ -0,0 +1,62 @@
+package com.provectus.kafka.ui.service.metrics.sink;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static org.springframework.util.StringUtils.hasText;
+
+import io.prometheus.client.Collector;
+import io.prometheus.client.exporter.BasicAuthHttpConnectionFactory;
+import io.prometheus.client.exporter.PushGateway;
+import jakarta.annotation.Nullable;
+import java.net.URL;
+import java.util.List;
+import java.util.Optional;
+import java.util.stream.Stream;
+import lombok.RequiredArgsConstructor;
+import lombok.SneakyThrows;
+import reactor.core.publisher.Mono;
+import reactor.core.scheduler.Schedulers;
+
+@RequiredArgsConstructor
+class PrometheusPushGatewaySink implements MetricsSink {
+
+  private static final String DEFAULT_PGW_JOB_NAME = "kafkaui";
+
+  private final PushGateway pushGateway;
+  private final String job;
+
+  @SneakyThrows
+  static PrometheusPushGatewaySink create(String url,
+                                          @Nullable String jobName,
+                                          @Nullable String username,
+                                          @Nullable String passw) {
+    var pushGateway = new PushGateway(new URL(url));
+    if (hasText(username) && hasText(passw)) {
+      pushGateway.setConnectionFactory(new BasicAuthHttpConnectionFactory(username, passw));
+    }
+    return new PrometheusPushGatewaySink(
+        pushGateway,
+        Optional.ofNullable(jobName).orElse(DEFAULT_PGW_JOB_NAME)
+    );
+  }
+
+  @Override
+  public Mono<Void> send(Stream<MetricFamilySamples> metrics) {
+    List<MetricFamilySamples> metricsToPush = metrics.toList();
+    if (metricsToPush.isEmpty()) {
+      return Mono.empty();
+    }
+    return Mono.<Void>fromRunnable(() -> pushSync(metricsToPush))
+        .subscribeOn(Schedulers.boundedElastic());
+  }
+
+  @SneakyThrows
+  private void pushSync(List<MetricFamilySamples> metricsToPush) {
+    Collector allMetrics = new Collector() {
+      @Override
+      public List<MetricFamilySamples> collect() {
+        return metricsToPush;
+      }
+    };
+    pushGateway.push(allMetrics, job);
+  }
+}

+ 79 - 0
kafka-ui-api/src/main/java/com/provectus/kafka/ui/service/metrics/sink/PrometheusRemoteWriteSink.java

@@ -0,0 +1,79 @@
+package com.provectus.kafka.ui.service.metrics.sink;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static prometheus.Types.Label;
+import static prometheus.Types.Sample;
+import static prometheus.Types.TimeSeries;
+
+import com.provectus.kafka.ui.config.ClustersProperties.TruststoreConfig;
+import com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose;
+import com.provectus.kafka.ui.util.WebClientConfigurator;
+import jakarta.annotation.Nullable;
+import java.net.URI;
+import java.util.stream.Stream;
+import lombok.SneakyThrows;
+import org.springframework.util.unit.DataSize;
+import org.springframework.web.reactive.function.client.WebClient;
+import org.xerial.snappy.Snappy;
+import prometheus.Remote;
+import reactor.core.publisher.Mono;
+
+class PrometheusRemoteWriteSink implements MetricsSink {
+
+  private final URI writeEndpoint;
+  private final WebClient webClient;
+
+  PrometheusRemoteWriteSink(String prometheusUrl, @Nullable TruststoreConfig truststoreConfig) {
+    this.writeEndpoint = URI.create(prometheusUrl).resolve("/api/v1/write");
+    this.webClient = new WebClientConfigurator()
+        .configureSsl(truststoreConfig, null)
+        .configureBufferSize(DataSize.ofMegabytes(20))
+        .build();
+  }
+
+  @SneakyThrows
+  @Override
+  public Mono<Void> send(Stream<MetricFamilySamples> metrics) {
+    byte[] bytesToWrite = Snappy.compress(createWriteRequest(metrics).toByteArray());
+    return webClient.post()
+        .uri(writeEndpoint)
+        .header("Content-Type", "application/x-protobuf")
+        .header("User-Agent", "promremote-kui/0.1.0")
+        .header("Content-Encoding", "snappy")
+        .header("X-Prometheus-Remote-Write-Version", "0.1.0")
+        .bodyValue(bytesToWrite)
+        .retrieve()
+        .toBodilessEntity()
+        .then();
+  }
+
+  private static Remote.WriteRequest createWriteRequest(Stream<MetricFamilySamples> metrics) {
+    long currentTs = System.currentTimeMillis();
+    Remote.WriteRequest.Builder request = Remote.WriteRequest.newBuilder();
+    metrics.forEach(mfs -> {
+      for (MetricFamilySamples.Sample sample : mfs.samples) {
+        TimeSeries.Builder timeSeriesBuilder = TimeSeries.newBuilder();
+        timeSeriesBuilder.addLabels(
+            Label.newBuilder().setName("__name__").setValue(sample.name)
+        );
+        for (int i = 0; i < sample.labelNames.size(); i++) {
+          timeSeriesBuilder.addLabels(
+              Label.newBuilder()
+                  .setName(sample.labelNames.get(i))
+                  .setValue(PrometheusExpose.escapedLabelValue(sample.labelValues.get(i)))
+          );
+        }
+        timeSeriesBuilder.addSamples(
+            Sample.newBuilder()
+                .setValue(sample.value)
+                .setTimestamp(currentTs)
+        );
+        request.addTimeseries(timeSeriesBuilder);
+      }
+    });
+    //TODO: pass Metadata
+    return request.build();
+  }
+
+
+}

+ 15 - 1
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/KafkaServicesValidation.java

@@ -19,6 +19,7 @@ import lombok.extern.slf4j.Slf4j;
 import org.apache.kafka.clients.admin.AdminClient;
 import org.apache.kafka.clients.admin.AdminClientConfig;
 import org.springframework.util.ResourceUtils;
+import prometheus.query.api.PrometheusClientApi;
 import reactor.core.publisher.Flux;
 import reactor.core.publisher.Mono;
 
@@ -46,7 +47,7 @@ public final class KafkaServicesValidation {
   public static Optional<String> validateTruststore(TruststoreConfig truststoreConfig) {
     if (truststoreConfig.getTruststoreLocation() != null && truststoreConfig.getTruststorePassword() != null) {
       try (FileInputStream fileInputStream = new FileInputStream(
-             (ResourceUtils.getFile(truststoreConfig.getTruststoreLocation())))) {
+          (ResourceUtils.getFile(truststoreConfig.getTruststoreLocation())))) {
         KeyStore trustStore = KeyStore.getInstance(KeyStore.getDefaultType());
         trustStore.load(fileInputStream, truststoreConfig.getTruststorePassword().toCharArray());
         TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance(
@@ -141,5 +142,18 @@ public final class KafkaServicesValidation {
         .onErrorResume(KafkaServicesValidation::invalid);
   }
 
+  public static Mono<ApplicationPropertyValidationDTO> validatePrometheusStore(
+      Supplier<ReactiveFailover<PrometheusClientApi>> clientSupplier) {
+    ReactiveFailover<PrometheusClientApi> client;
+    try {
+      client = clientSupplier.get();
+    } catch (Exception e) {
+      log.error("Error creating Prometheus client", e);
+      return invalid("Error creating Prometheus client: " + e.getMessage());
+    }
+    return client.mono(c -> c.query("1", null, null))
+        .then(valid())
+        .onErrorResume(KafkaServicesValidation::invalid);
+  }
 
 }

+ 0 - 6
kafka-ui-api/src/main/java/com/provectus/kafka/ui/util/ReactiveFailover.java

@@ -81,9 +81,6 @@ public class ReactiveFailover<T> {
         .flatMap(f)
         .onErrorResume(failoverExceptionsPredicate, th -> {
           publisher.markFailed();
-          if (candidates.size() == 1) {
-            return Mono.error(th);
-          }
           var newCandidates = candidates.stream().skip(1).filter(PublisherHolder::isActive).toList();
           if (newCandidates.isEmpty()) {
             return Mono.error(th);
@@ -106,9 +103,6 @@ public class ReactiveFailover<T> {
         .flatMapMany(f)
         .onErrorResume(failoverExceptionsPredicate, th -> {
           publisher.markFailed();
-          if (candidates.size() == 1) {
-            return Flux.error(th);
-          }
           var newCandidates = candidates.stream().skip(1).filter(PublisherHolder::isActive).toList();
           if (newCandidates.isEmpty()) {
             return Flux.error(th);

+ 19 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/container/PrometheusContainer.java

@@ -0,0 +1,19 @@
+package com.provectus.kafka.ui.container;
+
+import org.testcontainers.containers.GenericContainer;
+
+public class PrometheusContainer extends GenericContainer<PrometheusContainer> {
+
+  public PrometheusContainer() {
+    super("prom/prometheus:latest");
+    setCommandParts(new String[] {
+        "--web.enable-remote-write-receiver",
+        "--config.file=/etc/prometheus/prometheus.yml"
+    });
+    addExposedPort(9090);
+  }
+
+  public String url() {
+    return "http://" + getHost() + ":" + getMappedPort(9090);
+  }
+}

+ 15 - 20
kafka-ui-api/src/test/java/com/provectus/kafka/ui/model/PartitionDistributionStatsTest.java

@@ -23,28 +23,23 @@ class PartitionDistributionStatsTest {
     Node n4 = new Node(4, "n4", 9092);
 
     var stats = PartitionDistributionStats.create(
-        Statistics.builder()
-            .clusterDescription(
-                new ReactiveAdminClient.ClusterDescription(null, "test", Set.of(n1, n2, n3), null))
-            .topicDescriptions(
-                Map.of(
-                    "t1", new TopicDescription(
-                        "t1", false,
-                        List.of(
-                            new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)),
-                            new TopicPartitionInfo(1, n2, List.of(n2, n3), List.of(n2, n3))
-                        )
-                    ),
-                    "t2", new TopicDescription(
-                        "t2", false,
-                        List.of(
-                            new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)),
-                            new TopicPartitionInfo(1, null, List.of(n2, n1), List.of(n1))
-                        )
-                    )
+        List.of(
+            new TopicDescription(
+                "t1", false,
+                List.of(
+                    new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)),
+                    new TopicPartitionInfo(1, n2, List.of(n2, n3), List.of(n2, n3))
+                )
+            ),
+            new TopicDescription(
+                "t2", false,
+                List.of(
+                    new TopicPartitionInfo(0, n1, List.of(n1, n2), List.of(n1, n2)),
+                    new TopicPartitionInfo(1, null, List.of(n2, n1), List.of(n1))
                 )
             )
-            .build(), 4
+        ),
+        4
     );
 
     assertThat(stats.getPartitionLeaders())

+ 8 - 8
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/TopicsServicePaginationTest.java

@@ -71,7 +71,7 @@ class TopicsServicePaginationTest {
             .map(Objects::toString)
             .map(name -> new TopicDescription(name, false, List.of()))
             .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null,
-                Metrics.empty(), InternalLogDirStats.empty(), "_"))
+                Metrics.empty(), null, null, "_"))
             .collect(Collectors.toMap(InternalTopic::getName, Function.identity()))
     );
 
@@ -97,7 +97,7 @@ class TopicsServicePaginationTest {
         .map(Objects::toString)
         .map(name -> new TopicDescription(name, false, List.of()))
         .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null,
-            Metrics.empty(), InternalLogDirStats.empty(), "_"))
+            Metrics.empty(), null, null, "_"))
         .collect(Collectors.toMap(InternalTopic::getName, Function.identity()));
     init(internalTopics);
 
@@ -124,7 +124,7 @@ class TopicsServicePaginationTest {
             .map(Objects::toString)
             .map(name -> new TopicDescription(name, false, List.of()))
             .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null,
-                Metrics.empty(), InternalLogDirStats.empty(), "_"))
+                Metrics.empty(), null, null, "_"))
             .collect(Collectors.toMap(InternalTopic::getName, Function.identity()))
     );
 
@@ -143,7 +143,7 @@ class TopicsServicePaginationTest {
             .map(Objects::toString)
             .map(name -> new TopicDescription(name, false, List.of()))
             .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null,
-                Metrics.empty(), InternalLogDirStats.empty(), "_"))
+                Metrics.empty(), null, null, "_"))
             .collect(Collectors.toMap(InternalTopic::getName, Function.identity()))
     );
 
@@ -162,7 +162,7 @@ class TopicsServicePaginationTest {
             .map(Objects::toString)
             .map(name -> new TopicDescription(name, Integer.parseInt(name) % 10 == 0, List.of()))
             .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null,
-                Metrics.empty(), InternalLogDirStats.empty(), "_"))
+                Metrics.empty(), null, null, "_"))
             .collect(Collectors.toMap(InternalTopic::getName, Function.identity()))
     );
 
@@ -183,7 +183,7 @@ class TopicsServicePaginationTest {
             .map(Objects::toString)
             .map(name -> new TopicDescription(name, Integer.parseInt(name) % 5 == 0, List.of()))
             .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null,
-                Metrics.empty(), InternalLogDirStats.empty(), "_"))
+                Metrics.empty(), null, null, "_"))
             .collect(Collectors.toMap(InternalTopic::getName, Function.identity()))
     );
 
@@ -204,7 +204,7 @@ class TopicsServicePaginationTest {
             .map(Objects::toString)
             .map(name -> new TopicDescription(name, false, List.of()))
             .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), null,
-                Metrics.empty(), InternalLogDirStats.empty(), "_"))
+                Metrics.empty(), null, null, "_"))
             .collect(Collectors.toMap(InternalTopic::getName, Function.identity()))
     );
 
@@ -226,7 +226,7 @@ class TopicsServicePaginationTest {
                     new TopicPartitionInfo(p, null, List.of(), List.of()))
                 .collect(Collectors.toList())))
         .map(topicDescription -> InternalTopic.from(topicDescription, List.of(), InternalPartitionsOffsets.empty(),
-            Metrics.empty(), InternalLogDirStats.empty(), "_"))
+            Metrics.empty(), null, null, "_"))
         .collect(Collectors.toMap(InternalTopic::getName, Function.identity()));
 
     init(internalTopics);

+ 53 - 39
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/integration/odd/TopicsExporterTest.java

@@ -1,5 +1,7 @@
 package com.provectus.kafka.ui.service.integration.odd;
 
+import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.TopicState;
+import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.empty;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.mockito.ArgumentMatchers.anyBoolean;
 import static org.mockito.ArgumentMatchers.anyString;
@@ -9,6 +11,7 @@ import static org.mockito.Mockito.when;
 import com.provectus.kafka.ui.model.KafkaCluster;
 import com.provectus.kafka.ui.model.Statistics;
 import com.provectus.kafka.ui.service.StatisticsCache;
+import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
 import com.provectus.kafka.ui.sr.api.KafkaSrClientApi;
 import com.provectus.kafka.ui.sr.model.SchemaSubject;
 import com.provectus.kafka.ui.sr.model.SchemaType;
@@ -59,15 +62,22 @@ class TopicsExporterTest {
         .thenReturn(Mono.error(WebClientResponseException.create(404, "NF", new HttpHeaders(), null, null, null)));
     stats = Statistics.empty()
         .toBuilder()
-        .topicDescriptions(
-            Map.of(
-                "_hidden", new TopicDescription("_hidden", false, List.of(
-                    new TopicPartitionInfo(0, null, List.of(), List.of())
-                )),
-                "visible", new TopicDescription("visible", false, List.of(
-                    new TopicPartitionInfo(0, null, List.of(), List.of())
-                ))
-            )
+        .clusterState(
+            empty().toBuilder().topicStates(
+                Map.of(
+                    "_hidden",
+                    new TopicState(
+                        "_hidden",
+                        new TopicDescription("_hidden", false, List.of(
+                            new TopicPartitionInfo(0, null, List.of(), List.of())
+                        )), null, null, null, null, null),
+                    "visible",
+                    new TopicState("visible",
+                        new TopicDescription("visible", false, List.of(
+                            new TopicPartitionInfo(0, null, List.of(), List.of())
+                        )), null, null, null, null, null)
+                )
+            ).build()
         )
         .build();
 
@@ -101,40 +111,44 @@ class TopicsExporterTest {
 
     stats = Statistics.empty()
         .toBuilder()
-        .topicDescriptions(
-            Map.of(
-                "testTopic",
-                new TopicDescription(
-                    "testTopic",
-                    false,
-                    List.of(
-                        new TopicPartitionInfo(
-                            0,
-                            null,
+        .clusterState(
+            ScrapedClusterState.empty().toBuilder()
+                .topicStates(
+                    Map.of(
+                        "testTopic",
+                        new TopicState(
+                            "testTopic",
+                            new TopicDescription(
+                                "testTopic",
+                                false,
+                                List.of(
+                                    new TopicPartitionInfo(
+                                        0,
+                                        null,
+                                        List.of(
+                                            new Node(1, "host1", 9092),
+                                            new Node(2, "host2", 9092)
+                                        ),
+                                        List.of())
+                                )
+                            ),
                             List.of(
-                                new Node(1, "host1", 9092),
-                                new Node(2, "host2", 9092)
+                                new ConfigEntry(
+                                    "custom.config",
+                                    "100500",
+                                    ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG,
+                                    false,
+                                    false,
+                                    List.of(),
+                                    ConfigEntry.ConfigType.INT,
+                                    null
+                                )
                             ),
-                            List.of())
-                    ))
-            )
-        )
-        .topicConfigs(
-            Map.of(
-                "testTopic", List.of(
-                    new ConfigEntry(
-                        "custom.config",
-                        "100500",
-                        ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG,
-                        false,
-                        false,
-                        List.of(),
-                        ConfigEntry.ConfigType.INT,
-                        null
+                            null, null, null, null
+                        )
                     )
                 )
-            )
-        )
+                .build())
         .build();
 
     StepVerifier.create(topicsExporter.export(cluster))

+ 2 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/ksql/KsqlApiClientTest.java

@@ -11,12 +11,14 @@ import com.provectus.kafka.ui.AbstractIntegrationTest;
 import java.math.BigDecimal;
 import java.time.Duration;
 import java.util.Map;
+import org.junit.Ignore;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
 import org.testcontainers.shaded.org.awaitility.Awaitility;
 import reactor.test.StepVerifier;
 
+@Ignore
 class KsqlApiClientTest extends AbstractIntegrationTest {
 
   @BeforeAll

+ 0 - 30
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/PrometheusEndpointMetricsParserTest.java

@@ -1,30 +0,0 @@
-package com.provectus.kafka.ui.service.metrics;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-import java.util.Map;
-import java.util.Optional;
-import org.junit.jupiter.api.Test;
-
-class PrometheusEndpointMetricsParserTest {
-
-  @Test
-  void test() {
-    String metricsString =
-        "kafka_server_BrokerTopicMetrics_FifteenMinuteRate"
-            + "{name=\"BytesOutPerSec\",topic=\"__confluent.support.metrics\",} 123.1234";
-
-    Optional<RawMetric> parsedOpt = PrometheusEndpointMetricsParser.parse(metricsString);
-
-    assertThat(parsedOpt).hasValueSatisfying(metric -> {
-      assertThat(metric.name()).isEqualTo("kafka_server_BrokerTopicMetrics_FifteenMinuteRate");
-      assertThat(metric.value()).isEqualTo("123.1234");
-      assertThat(metric.labels()).containsExactlyEntriesOf(
-          Map.of(
-              "name", "BytesOutPerSec",
-              "topic", "__confluent.support.metrics"
-          ));
-    });
-  }
-
-}

+ 0 - 97
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/PrometheusMetricsRetrieverTest.java

@@ -1,97 +0,0 @@
-package com.provectus.kafka.ui.service.metrics;
-
-import com.provectus.kafka.ui.model.MetricsConfig;
-import java.io.IOException;
-import java.math.BigDecimal;
-import java.util.List;
-import java.util.Map;
-import okhttp3.mockwebserver.MockResponse;
-import okhttp3.mockwebserver.MockWebServer;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-import org.springframework.web.reactive.function.client.WebClient;
-import reactor.test.StepVerifier;
-
-class PrometheusMetricsRetrieverTest {
-
-  private final PrometheusMetricsRetriever retriever = new PrometheusMetricsRetriever();
-
-  private final MockWebServer mockWebServer = new MockWebServer();
-
-  @BeforeEach
-  void startMockServer() throws IOException {
-    mockWebServer.start();
-  }
-
-  @AfterEach
-  void stopMockServer() throws IOException {
-    mockWebServer.close();
-  }
-
-  @Test
-  void callsMetricsEndpointAndConvertsResponceToRawMetric() {
-    var url = mockWebServer.url("/metrics");
-    mockWebServer.enqueue(prepareResponse());
-
-    MetricsConfig metricsConfig = prepareMetricsConfig(url.port(), null, null);
-
-    StepVerifier.create(retriever.retrieve(WebClient.create(), url.host(), metricsConfig))
-        .expectNextSequence(expectedRawMetrics())
-        // third metric should not be present, since it has "NaN" value
-        .verifyComplete();
-  }
-
-  @Test
-  void callsSecureMetricsEndpointAndConvertsResponceToRawMetric() {
-    var url = mockWebServer.url("/metrics");
-    mockWebServer.enqueue(prepareResponse());
-
-
-    MetricsConfig metricsConfig = prepareMetricsConfig(url.port(), "username", "password");
-
-    StepVerifier.create(retriever.retrieve(WebClient.create(), url.host(), metricsConfig))
-        .expectNextSequence(expectedRawMetrics())
-        // third metric should not be present, since it has "NaN" value
-        .verifyComplete();
-  }
-
-  MockResponse prepareResponse() {
-    // body copied from real jmx exporter
-    return new MockResponse().setBody(
-        "# HELP kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate Attribute exposed for management \n"
-            + "# TYPE kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate untyped\n"
-            + "kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate{name=\"RequestHandlerAvgIdlePercent\",} 0.898\n"
-            + "# HELP kafka_server_socket_server_metrics_request_size_avg The average size of requests sent. \n"
-            + "# TYPE kafka_server_socket_server_metrics_request_size_avg untyped\n"
-            + "kafka_server_socket_server_metrics_request_size_avg{listener=\"PLAIN\",networkProcessor=\"1\",} 101.1\n"
-            + "kafka_server_socket_server_metrics_request_size_avg{listener=\"PLAIN2\",networkProcessor=\"5\",} NaN"
-    );
-  }
-
-  MetricsConfig prepareMetricsConfig(Integer port, String username, String password) {
-    return MetricsConfig.builder()
-        .ssl(false)
-        .port(port)
-        .type(MetricsConfig.PROMETHEUS_METRICS_TYPE)
-        .username(username)
-        .password(password)
-        .build();
-  }
-
-  List<RawMetric> expectedRawMetrics() {
-
-    var firstMetric = RawMetric.create(
-        "kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate",
-        Map.of("name", "RequestHandlerAvgIdlePercent"),
-        new BigDecimal("0.898")
-    );
-
-    var secondMetric = RawMetric.create(
-        "kafka_server_socket_server_metrics_request_size_avg",
-        Map.of("listener", "PLAIN", "networkProcessor", "1"),
-        new BigDecimal("101.1")
-    );
-    return List.of(firstMetric, secondMetric);
-  }
-}

+ 0 - 93
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/WellKnownMetricsTest.java

@@ -1,93 +0,0 @@
-package com.provectus.kafka.ui.service.metrics;
-
-import static org.assertj.core.api.Assertions.assertThat;
-
-import com.provectus.kafka.ui.model.Metrics;
-import java.math.BigDecimal;
-import java.util.Arrays;
-import java.util.Map;
-import java.util.Optional;
-import org.apache.kafka.common.Node;
-import org.junit.jupiter.api.Test;
-
-class WellKnownMetricsTest {
-
-  private final WellKnownMetrics wellKnownMetrics = new WellKnownMetrics();
-
-  @Test
-  void bytesIoTopicMetricsPopulated() {
-    populateWith(
-        new Node(0, "host", 123),
-        "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesInPerSec\",topic=\"test-topic\",} 1.0",
-        "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesOutPerSec\",topic=\"test-topic\",} 2.0",
-        "kafka_server_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",topic=\"test-topic\",} 1.0",
-        "kafka_server_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",topic=\"test-topic\",} 2.0",
-        "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",topic=\"test-topic\",} 1.0",
-        "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",topic=\"test-topic\",} 2.0"
-    );
-    assertThat(wellKnownMetrics.bytesInFifteenMinuteRate)
-        .containsEntry("test-topic", new BigDecimal("3.0"));
-    assertThat(wellKnownMetrics.bytesOutFifteenMinuteRate)
-        .containsEntry("test-topic", new BigDecimal("6.0"));
-  }
-
-  @Test
-  void bytesIoBrokerMetricsPopulated() {
-    populateWith(
-        new Node(1, "host1", 123),
-        "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesInPerSec\",} 1.0",
-        "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesOutPerSec\",} 2.0"
-    );
-    populateWith(
-        new Node(2, "host2", 345),
-        "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",} 10.0",
-        "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",} 20.0"
-    );
-
-    assertThat(wellKnownMetrics.brokerBytesInFifteenMinuteRate)
-        .hasSize(2)
-        .containsEntry(1, new BigDecimal("1.0"))
-        .containsEntry(2, new BigDecimal("10.0"));
-
-    assertThat(wellKnownMetrics.brokerBytesOutFifteenMinuteRate)
-        .hasSize(2)
-        .containsEntry(1, new BigDecimal("2.0"))
-        .containsEntry(2, new BigDecimal("20.0"));
-  }
-
-  @Test
-  void appliesInnerStateToMetricsBuilder() {
-    //filling per topic io rates
-    wellKnownMetrics.bytesInFifteenMinuteRate.put("topic", new BigDecimal(1));
-    wellKnownMetrics.bytesOutFifteenMinuteRate.put("topic", new BigDecimal(2));
-
-    //filling per broker io rates
-    wellKnownMetrics.brokerBytesInFifteenMinuteRate.put(1, new BigDecimal(1));
-    wellKnownMetrics.brokerBytesOutFifteenMinuteRate.put(1, new BigDecimal(2));
-    wellKnownMetrics.brokerBytesInFifteenMinuteRate.put(2, new BigDecimal(10));
-    wellKnownMetrics.brokerBytesOutFifteenMinuteRate.put(2, new BigDecimal(20));
-
-    Metrics.MetricsBuilder builder = Metrics.builder();
-    wellKnownMetrics.apply(builder);
-    var metrics = builder.build();
-
-    // checking per topic io rates
-    assertThat(metrics.getTopicBytesInPerSec()).containsExactlyEntriesOf(wellKnownMetrics.bytesInFifteenMinuteRate);
-    assertThat(metrics.getTopicBytesOutPerSec()).containsExactlyEntriesOf(wellKnownMetrics.bytesOutFifteenMinuteRate);
-
-    // checking per broker io rates
-    assertThat(metrics.getBrokerBytesInPerSec()).containsExactlyInAnyOrderEntriesOf(
-        Map.of(1, new BigDecimal(1), 2, new BigDecimal(10)));
-    assertThat(metrics.getBrokerBytesOutPerSec()).containsExactlyInAnyOrderEntriesOf(
-        Map.of(1, new BigDecimal(2), 2, new BigDecimal(20)));
-  }
-
-  private void populateWith(Node n, String... prometheusMetric) {
-    Arrays.stream(prometheusMetric)
-        .map(PrometheusEndpointMetricsParser::parse)
-        .filter(Optional::isPresent)
-        .map(Optional::get)
-        .forEach(m -> wellKnownMetrics.populate(n, m));
-  }
-
-}

+ 53 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/prometheus/PrometheusExposeTest.java

@@ -0,0 +1,53 @@
+package com.provectus.kafka.ui.service.metrics.prometheus;
+
+import static com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose.prepareMetricsForGlobalExpose;
+import static io.prometheus.client.Collector.Type.GAUGE;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.model.Metrics;
+import com.provectus.kafka.ui.service.metrics.scrape.inferred.InferredMetrics;
+import io.prometheus.client.Collector.MetricFamilySamples;
+import io.prometheus.client.Collector.MetricFamilySamples.Sample;
+import java.util.List;
+import java.util.Map;
+import org.junit.jupiter.api.Test;
+
+class PrometheusExposeTest {
+
+  @Test
+  void prepareMetricsForGlobalExposeAppendsClusterAndBrokerIdLabelsToMetrics() {
+
+    var inferredMfs = new MetricFamilySamples("infer", GAUGE, "help", List.of(
+        new Sample("infer1", List.of("lbl1"), List.of("lblVal1"), 100)));
+
+    var broker1Mfs = new MetricFamilySamples("brok", GAUGE, "help", List.of(
+        new Sample("brok", List.of("broklbl1"), List.of("broklblVal1"), 101)));
+
+    var broker2Mfs = new MetricFamilySamples("brok", GAUGE, "help", List.of(
+        new Sample("brok", List.of("broklbl1"), List.of("broklblVal1"), 102)));
+
+    List<MetricFamilySamples> prepared = prepareMetricsForGlobalExpose(
+        "testCluster",
+        Metrics.builder()
+            .inferredMetrics(new InferredMetrics(List.of(inferredMfs)))
+            .perBrokerScrapedMetrics(Map.of(1, List.of(broker1Mfs), 2, List.of(broker2Mfs)))
+            .build()
+    ).toList();
+
+    assertThat(prepared)
+        .hasSize(3)
+        .contains(new MetricFamilySamples("infer", GAUGE, "help", List.of(
+            new Sample("infer1", List.of("cluster", "lbl1"), List.of("testCluster", "lblVal1"), 100))))
+        .contains(
+            new MetricFamilySamples("brok", GAUGE, "help", List.of(
+                new Sample("brok", List.of("cluster", "broker_id", "broklbl1"),
+                    List.of("testCluster", "1", "broklblVal1"), 101)))
+        )
+        .contains(
+            new MetricFamilySamples("brok", GAUGE, "help", List.of(
+                new Sample("brok", List.of("cluster", "broker_id", "broklbl1"),
+                    List.of("testCluster", "2", "broklblVal1"), 102)))
+        );
+  }
+
+}

+ 75 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/IoRatesMetricsScannerTest.java

@@ -0,0 +1,75 @@
+package com.provectus.kafka.ui.service.metrics.scrape;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static java.util.Arrays.stream;
+import static java.util.stream.Collectors.toMap;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.service.metrics.scrape.prometheus.PrometheusEndpointParser;
+import java.math.BigDecimal;
+import java.util.List;
+import java.util.Map;
+import org.apache.kafka.common.Node;
+import org.junit.jupiter.api.Test;
+
+class IoRatesMetricsScannerTest {
+
+  private IoRatesMetricsScanner ioRatesMetricsScanner;
+
+  @Test
+  void bytesIoTopicMetricsPopulated() {
+    populateWith(
+        nodeMetrics(
+            new Node(0, "host", 123),
+            "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesInPerSec\",topic=\"test\",} 1.0",
+            "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesOutPerSec\",topic=\"test\",} 2.0",
+            "kafka_server_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",topic=\"test\",} 1.0",
+            "kafka_server_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",topic=\"test\",} 2.0",
+            "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",topic=\"test\",} 1.0",
+            "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",topic=\"test\",} 2.0"
+        )
+    );
+    assertThat(ioRatesMetricsScanner.bytesInFifteenMinuteRate)
+        .containsEntry("test", new BigDecimal("3.0"));
+    assertThat(ioRatesMetricsScanner.bytesOutFifteenMinuteRate)
+        .containsEntry("test", new BigDecimal("6.0"));
+  }
+
+  @Test
+  void bytesIoBrokerMetricsPopulated() {
+    populateWith(
+        nodeMetrics(
+            new Node(1, "host1", 123),
+            "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesInPerSec\",} 1.0",
+            "kafka_server_BrokerTopicMetrics_FifteenMinuteRate{name=\"BytesOutPerSec\",} 2.0"
+        ),
+        nodeMetrics(
+            new Node(2, "host2", 345),
+            "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesinpersec\",} 10.0",
+            "some_unknown_prefix_brokertopicmetrics_fifteenminuterate{name=\"bytesoutpersec\",} 20.0"
+        )
+    );
+
+    assertThat(ioRatesMetricsScanner.brokerBytesInFifteenMinuteRate)
+        .hasSize(2)
+        .containsEntry(1, new BigDecimal("1.0"))
+        .containsEntry(2, new BigDecimal("10.0"));
+
+    assertThat(ioRatesMetricsScanner.brokerBytesOutFifteenMinuteRate)
+        .hasSize(2)
+        .containsEntry(1, new BigDecimal("2.0"))
+        .containsEntry(2, new BigDecimal("20.0"));
+  }
+
+  @SafeVarargs
+  private void populateWith(Map.Entry<Integer, List<MetricFamilySamples>>... entries) {
+    ioRatesMetricsScanner = new IoRatesMetricsScanner(
+        stream(entries).collect(toMap(Map.Entry::getKey, Map.Entry::getValue))
+    );
+  }
+
+  private Map.Entry<Integer, List<MetricFamilySamples>> nodeMetrics(Node n, String... prometheusMetrics) {
+    return Map.entry(n.id(), PrometheusEndpointParser.parse(stream(prometheusMetrics)));
+  }
+
+}

+ 121 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/inferred/InferredMetricsScraperTest.java

@@ -0,0 +1,121 @@
+package com.provectus.kafka.ui.service.metrics.scrape.inferred;
+
+import static com.provectus.kafka.ui.model.InternalLogDirStats.LogDirSpaceStats;
+import static com.provectus.kafka.ui.model.InternalLogDirStats.SegmentStats;
+import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.ConsumerGroupState;
+import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.NodeState;
+import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.TopicState;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.model.InternalLogDirStats;
+import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
+import java.time.Instant;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import org.apache.kafka.clients.admin.ConsumerGroupDescription;
+import org.apache.kafka.clients.admin.MemberAssignment;
+import org.apache.kafka.clients.admin.MemberDescription;
+import org.apache.kafka.clients.admin.TopicDescription;
+import org.apache.kafka.common.Node;
+import org.apache.kafka.common.TopicPartition;
+import org.apache.kafka.common.TopicPartitionInfo;
+import org.junit.jupiter.api.Test;
+import reactor.core.publisher.Mono;
+import reactor.test.StepVerifier;
+
+class InferredMetricsScraperTest {
+
+  final InferredMetricsScraper scraper = new InferredMetricsScraper();
+
+  @Test
+  void allExpectedMetricsScraped() {
+    var segmentStats = new SegmentStats(1234L, 3);
+    var logDirStats = new LogDirSpaceStats(234L, 345L, Map.of(), Map.of());
+
+    Node node1 = new Node(1, "node1", 9092);
+    Node node2 = new Node(2, "node2", 9092);
+
+    Mono<InferredMetrics> scraped = scraper.scrape(
+        ScrapedClusterState.builder()
+            .scrapeFinishedAt(Instant.now())
+            .nodesStates(
+                Map.of(
+                    1, new NodeState(1, node1, segmentStats, logDirStats),
+                    2, new NodeState(2, node2, segmentStats, logDirStats)
+                )
+            )
+            .topicStates(
+                Map.of(
+                    "t1",
+                    new TopicState(
+                        "t1",
+                        new TopicDescription(
+                            "t1",
+                            false,
+                            List.of(
+                                new TopicPartitionInfo(0, node1, List.of(node1, node2), List.of(node1, node2)),
+                                new TopicPartitionInfo(1, node1, List.of(node1, node2), List.of(node1))
+                            )
+                        ),
+                        List.of(),
+                        Map.of(0, 100L, 1, 101L),
+                        Map.of(0, 200L, 1, 201L),
+                        segmentStats,
+                        Map.of(0, segmentStats, 1, segmentStats)
+                    )
+                )
+            )
+            .consumerGroupsStates(
+                Map.of(
+                    "cg1",
+                    new ConsumerGroupState(
+                        "cg1",
+                        new ConsumerGroupDescription(
+                            "cg1",
+                            true,
+                            List.of(
+                                new MemberDescription(
+                                    "memb1", Optional.empty(), "client1", "hst1",
+                                    new MemberAssignment(Set.of(new TopicPartition("t1", 0)))
+                                )
+                            ),
+                            null,
+                            org.apache.kafka.common.ConsumerGroupState.STABLE,
+                            node1
+                        ),
+                        Map.of(new TopicPartition("t1", 0), 150L)
+                    )
+                )
+            )
+            .build()
+    );
+
+    StepVerifier.create(scraped)
+        .assertNext(inferredMetrics ->
+            assertThat(inferredMetrics.asStream().map(m -> m.name)).containsExactlyInAnyOrder(
+                "broker_count",
+                "broker_bytes_disk",
+                "broker_bytes_usable",
+                "broker_bytes_total",
+                "topic_count",
+                "kafka_topic_partitions",
+                "kafka_topic_partition_current_offset",
+                "kafka_topic_partition_oldest_offset",
+                "kafka_topic_partition_in_sync_replica",
+                "kafka_topic_partition_replicas",
+                "kafka_topic_partition_leader",
+                "topic_bytes_disk",
+                "group_count",
+                "group_state",
+                "group_member_count",
+                "group_host_count",
+                "kafka_consumergroup_current_offset",
+                "kafka_consumergroup_lag"
+            )
+        )
+        .verifyComplete();
+  }
+
+}

+ 3 - 2
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/JmxMetricsFormatterTest.java → kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/jmx/JmxMetricsFormatterTest.java

@@ -1,7 +1,8 @@
-package com.provectus.kafka.ui.service.metrics;
+package com.provectus.kafka.ui.service.metrics.scrape.jmx;
 
 import static org.assertj.core.api.Assertions.assertThat;
 
+import com.provectus.kafka.ui.service.metrics.RawMetric;
 import java.math.BigDecimal;
 import java.util.List;
 import java.util.Map;
@@ -74,4 +75,4 @@ class JmxMetricsFormatterTest {
     assertThat(actual.value()).isCloseTo(expected.value(), Offset.offset(new BigDecimal("0.001")));
   }
 
-}
+}

+ 186 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusEndpointParserTest.java

@@ -0,0 +1,186 @@
+package com.provectus.kafka.ui.service.metrics.scrape.prometheus;
+
+import static com.provectus.kafka.ui.service.metrics.scrape.prometheus.PrometheusEndpointParser.parse;
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static io.prometheus.client.Collector.MetricFamilySamples.Sample;
+import static io.prometheus.client.Collector.Type;
+import static java.lang.Double.POSITIVE_INFINITY;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+import com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose;
+import io.prometheus.client.Collector;
+import io.prometheus.client.CollectorRegistry;
+import io.prometheus.client.Counter;
+import io.prometheus.client.Gauge;
+import io.prometheus.client.Histogram;
+import io.prometheus.client.Info;
+import io.prometheus.client.Summary;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+import org.junit.jupiter.api.Test;
+
+class PrometheusEndpointParserTest {
+
+  @Test
+  void parsesAllGeneratedMetricTypes() {
+    List<MetricFamilySamples> original = generateMfs();
+    String exposed = PrometheusExpose.constructHttpsResponse(original.stream()).getBody();
+    List<MetricFamilySamples> parsed = parse(exposed.lines());
+    assertThat(parsed).containsExactlyElementsOf(original);
+  }
+
+  @Test
+  void parsesMetricsFromPrometheusEndpointOutput() {
+    String expose = """
+            # HELP http_requests_total The total number of HTTP requests.
+            # TYPE http_requests_total counter
+            http_requests_total{method="post",code="200",} 1027 1395066363000
+            http_requests_total{method="post",code="400",}    3 1395066363000
+            # Minimalistic line:
+            metric_without_timestamp_and_labels 12.47
+
+            # A weird metric from before the epoch:
+            something_weird{problem="division by zero"} +Inf -3982045
+
+            # TYPE something_untyped untyped
+            something_untyped{} -123123
+
+            # TYPE unit_test_seconds counter
+            # UNIT unit_test_seconds seconds
+            # HELP unit_test_seconds Testing that unit parsed properly
+            unit_test_seconds_total 4.20072246e+06
+
+            # HELP http_request_duration_seconds A histogram of the request duration.
+            # TYPE http_request_duration_seconds histogram
+            http_request_duration_seconds_bucket{le="0.05"} 24054
+            http_request_duration_seconds_bucket{le="0.1"} 33444
+            http_request_duration_seconds_bucket{le="0.2"} 100392
+            http_request_duration_seconds_bucket{le="0.5"} 129389
+            http_request_duration_seconds_bucket{le="1"} 133988
+            http_request_duration_seconds_bucket{le="+Inf"} 144320
+            http_request_duration_seconds_sum 53423
+            http_request_duration_seconds_count 144320
+        """;
+    List<MetricFamilySamples> parsed = parse(expose.lines());
+    assertThat(parsed).contains(
+        new MetricFamilySamples(
+            "http_requests_total",
+            Type.COUNTER,
+            "The total number of HTTP requests.",
+            List.of(
+                new Sample("http_requests_total", List.of("method", "code"), List.of("post", "200"), 1027),
+                new Sample("http_requests_total", List.of("method", "code"), List.of("post", "400"), 3)
+            )
+        ),
+        new MetricFamilySamples(
+            "metric_without_timestamp_and_labels",
+            Type.GAUGE,
+            "metric_without_timestamp_and_labels",
+            List.of(new Sample("metric_without_timestamp_and_labels", List.of(), List.of(), 12.47))
+        ),
+        new MetricFamilySamples(
+            "something_weird",
+            Type.GAUGE,
+            "something_weird",
+            List.of(new Sample("something_weird", List.of("problem"), List.of("division by zero"), POSITIVE_INFINITY))
+        ),
+        new MetricFamilySamples(
+            "something_untyped",
+            Type.GAUGE,
+            "something_untyped",
+            List.of(new Sample("something_untyped", List.of(), List.of(), -123123))
+        ),
+        new MetricFamilySamples(
+            "unit_test_seconds",
+            "seconds",
+            Type.COUNTER,
+            "Testing that unit parsed properly",
+            List.of(new Sample("unit_test_seconds_total", List.of(), List.of(), 4.20072246e+06))
+        ),
+        new MetricFamilySamples(
+            "http_request_duration_seconds",
+            Type.HISTOGRAM,
+            "A histogram of the request duration.",
+            List.of(
+                new Sample("http_request_duration_seconds_bucket", List.of("le"), List.of("0.05"), 24054),
+                new Sample("http_request_duration_seconds_bucket", List.of("le"), List.of("0.1"), 33444),
+                new Sample("http_request_duration_seconds_bucket", List.of("le"), List.of("0.2"), 100392),
+                new Sample("http_request_duration_seconds_bucket", List.of("le"), List.of("0.5"), 129389),
+                new Sample("http_request_duration_seconds_bucket", List.of("le"), List.of("1"), 133988),
+                new Sample("http_request_duration_seconds_bucket", List.of("le"), List.of("+Inf"), 144320),
+                new Sample("http_request_duration_seconds_sum", List.of(), List.of(), 53423),
+                new Sample("http_request_duration_seconds_count", List.of(), List.of(), 144320)
+            )
+        )
+    );
+  }
+
+  private List<MetricFamilySamples> generateMfs() {
+    CollectorRegistry collectorRegistry = new CollectorRegistry();
+
+    Gauge.build()
+        .name("test_gauge")
+        .help("help for gauge")
+        .register(collectorRegistry)
+        .set(42);
+
+    Info.build()
+        .name("test_info")
+        .help("help for info")
+        .register(collectorRegistry)
+        .info("branch", "HEAD", "version", "1.2.3", "revision", "e0704b");
+
+    Counter.build()
+        .name("counter_no_labels")
+        .help("help for counter no lbls")
+        .register(collectorRegistry)
+        .inc(111);
+
+    var counterWithLbls = Counter.build()
+        .name("counter_with_labels")
+        .help("help for counter with lbls")
+        .labelNames("lbl1", "lbl2")
+        .register(collectorRegistry);
+
+    counterWithLbls.labels("v1", "v2").inc(234);
+    counterWithLbls.labels("v11", "v22").inc(345);
+
+    var histogram = Histogram.build()
+        .name("test_hist")
+        .help("help for hist")
+        .linearBuckets(0.0, 1.0, 10)
+        .labelNames("lbl1", "lbl2")
+        .register(collectorRegistry);
+
+    var summary = Summary.build()
+        .name("test_summary")
+        .help("help for hist")
+        .labelNames("lbl1", "lbl2")
+        .register(collectorRegistry);
+
+    for (int i = 0; i < 30; i++) {
+      var val = ThreadLocalRandom.current().nextDouble(10.0);
+      histogram.labels("v1", "v2").observe(val);
+      summary.labels("v1", "v2").observe(val);
+    }
+
+    //emulating unknown type
+    collectorRegistry.register(new Collector() {
+      @Override
+      public List<MetricFamilySamples> collect() {
+        return List.of(
+            new MetricFamilySamples(
+                "test_unknown",
+                Type.UNKNOWN,
+                "help for unknown",
+                List.of(new Sample("test_unknown", List.of("l1"), List.of("v1"), 23432.0))
+            )
+        );
+      }
+    });
+    return Lists.newArrayList(Iterators.forEnumeration(collectorRegistry.metricFamilySamples()));
+  }
+
+}

+ 118 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/scrape/prometheus/PrometheusMetricsRetrieverTest.java

@@ -0,0 +1,118 @@
+package com.provectus.kafka.ui.service.metrics.scrape.prometheus;
+
+import static io.prometheus.client.Collector.MetricFamilySamples;
+import static io.prometheus.client.Collector.Type;
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.model.MetricsScrapeProperties;
+import io.prometheus.client.Collector.MetricFamilySamples.Sample;
+import java.io.IOException;
+import java.util.List;
+import okhttp3.mockwebserver.MockResponse;
+import okhttp3.mockwebserver.MockWebServer;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import reactor.test.StepVerifier;
+
+class PrometheusMetricsRetrieverTest {
+
+  private final MockWebServer mockWebServer = new MockWebServer();
+
+  @BeforeEach
+  void startMockServer() throws IOException {
+    mockWebServer.start();
+  }
+
+  @AfterEach
+  void stopMockServer() throws IOException {
+    mockWebServer.close();
+  }
+
+  @Test
+  void callsMetricsEndpointAndConvertsResponceToRawMetric() {
+    var url = mockWebServer.url("/metrics");
+    mockWebServer.enqueue(prepareResponse());
+
+    MetricsScrapeProperties scrapeProperties = prepareMetricsConfig(url.port(), null, null);
+    var retriever = new PrometheusMetricsRetriever(scrapeProperties);
+
+    StepVerifier.create(retriever.retrieve(url.host()))
+        .assertNext(metrics -> assertThat(metrics).containsExactlyElementsOf(expectedMetrics()))
+        .verifyComplete();
+  }
+
+  @Test
+  void callsSecureMetricsEndpointAndConvertsResponceToRawMetric() {
+    var url = mockWebServer.url("/metrics");
+    mockWebServer.enqueue(prepareResponse());
+
+    MetricsScrapeProperties scrapeProperties = prepareMetricsConfig(url.port(), "username", "password");
+    var retriever = new PrometheusMetricsRetriever(scrapeProperties);
+
+    StepVerifier.create(retriever.retrieve(url.host()))
+        .assertNext(metrics -> assertThat(metrics).containsExactlyElementsOf(expectedMetrics()))
+        .verifyComplete();
+  }
+
+  private MockResponse prepareResponse() {
+    // body copied from jmx exporter output
+    return new MockResponse().setBody(
+        """
+            # HELP kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate Attribute exposed for management
+            # TYPE kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate untyped
+            kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate{name="RequestHandlerAvgIdlePercent",} 0.898
+            # HELP kafka_server_socket_server_metrics_request_size_avg The average size of requests sent.
+            # TYPE kafka_server_socket_server_metrics_request_size_avg untyped
+            kafka_server_socket_server_metrics_request_size_avg{listener="PLAIN",networkProcessor="1",} 101.1
+            kafka_server_socket_server_metrics_request_size_avg{listener="PLAIN2",networkProcessor="5",} 202.2
+            """
+    );
+  }
+
+  private MetricsScrapeProperties prepareMetricsConfig(Integer port, String username, String password) {
+    return MetricsScrapeProperties.builder()
+        .ssl(false)
+        .port(port)
+        .username(username)
+        .password(password)
+        .build();
+  }
+
+  private List<MetricFamilySamples> expectedMetrics() {
+    return List.of(
+        new MetricFamilySamples(
+            "kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate",
+            Type.GAUGE,
+            "Attribute exposed for management",
+            List.of(
+                new Sample(
+                    "kafka_server_KafkaRequestHandlerPool_FifteenMinuteRate",
+                    List.of("name"),
+                    List.of("RequestHandlerAvgIdlePercent"),
+                    0.898
+                )
+            )
+        ),
+        new MetricFamilySamples(
+            "kafka_server_socket_server_metrics_request_size_avg",
+            Type.GAUGE,
+            "The average size of requests sent.",
+            List.of(
+                new Sample(
+                    "kafka_server_socket_server_metrics_request_size_avg",
+                    List.of("listener", "networkProcessor"),
+                    List.of("PLAIN", "1"),
+                    101.1
+                ),
+                new Sample(
+                    "kafka_server_socket_server_metrics_request_size_avg",
+                    List.of("listener", "networkProcessor"),
+                    List.of("PLAIN2", "5"),
+                    202.2
+                )
+            )
+        )
+    );
+  }
+}

+ 62 - 0
kafka-ui-api/src/test/java/com/provectus/kafka/ui/service/metrics/sink/PrometheusRemoteWriteSinkTest.java

@@ -0,0 +1,62 @@
+package com.provectus.kafka.ui.service.metrics.sink;
+
+import static org.assertj.core.api.Assertions.assertThat;
+
+import com.provectus.kafka.ui.container.PrometheusContainer;
+import io.prometheus.client.Collector;
+import io.prometheus.client.Collector.MetricFamilySamples;
+import io.prometheus.client.Collector.MetricFamilySamples.Sample;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Stream;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import prometheus.query.ApiClient;
+import prometheus.query.api.PrometheusClientApi;
+import prometheus.query.model.QueryResponse;
+
+class PrometheusRemoteWriteSinkTest {
+
+  private final PrometheusContainer prometheusContainer = new PrometheusContainer();
+
+  @BeforeEach
+  void startPromContainer() {
+    prometheusContainer.start();
+  }
+
+  @AfterEach
+  void stopPromContainer() {
+    prometheusContainer.stop();
+  }
+
+  @Test
+  void metricsPushedToPrometheus() {
+    var sink = new PrometheusRemoteWriteSink(prometheusContainer.url(), null);
+    sink.send(
+        Stream.of(
+            new MetricFamilySamples(
+                "test_metric1", Collector.Type.GAUGE, "help here",
+                List.of(new Sample("test_metric1", List.of(), List.of(), 111.111))
+            ),
+            new MetricFamilySamples(
+                "test_metric2", Collector.Type.GAUGE, "help here",
+                List.of(new Sample("test_metric2", List.of(), List.of(), 222.222))
+            )
+        )
+    ).block();
+
+    assertThat(queryMetricValue("test_metric1"))
+        .isEqualTo("111.111");
+
+    assertThat(queryMetricValue("test_metric2"))
+        .isEqualTo("222.222");
+  }
+
+  private String queryMetricValue(String metricName) {
+    PrometheusClientApi promClient = new PrometheusClientApi(new ApiClient().setBasePath(prometheusContainer.url()));
+    QueryResponse resp = promClient.query(metricName, null, null).block();
+    return (String) ((List<?>) ((Map<?, ?>) resp.getData().getResult().get(0)).get("value")).get(1);
+  }
+
+}

+ 63 - 1
kafka-ui-contract/pom.xml

@@ -46,6 +46,11 @@
                     <artifactId>javax.annotation-api</artifactId>
                     <version>1.3.2</version>
                 </dependency>
+                <dependency>
+                    <groupId>com.google.protobuf</groupId>
+                    <artifactId>protobuf-java</artifactId>
+                    <version>3.22.4</version>
+                </dependency>
             </dependencies>
 
             <build>
@@ -151,6 +156,30 @@
                                     </configOptions>
                                 </configuration>
                             </execution>
+                            <execution>
+                                <id>generate-prometheus-query-api</id>
+                                <goals>
+                                    <goal>generate</goal>
+                                </goals>
+                                <configuration>
+                                    <inputSpec>${project.basedir}/src/main/resources/swagger/prometheus-query-api.yaml
+                                    </inputSpec>
+                                    <output>${project.build.directory}/generated-sources/prometheus-query-api</output>
+                                    <generatorName>java</generatorName>
+                                    <generateApiTests>false</generateApiTests>
+                                    <generateModelTests>false</generateModelTests>
+                                    <configOptions>
+                                        <modelPackage>prometheus.query.model</modelPackage>
+                                        <apiPackage>prometheus.query.api</apiPackage>
+                                        <sourceFolder>prometheus-query</sourceFolder>
+                                        <asyncNative>true</asyncNative>
+                                        <library>webclient</library>
+                                        <useJakartaEe>true</useJakartaEe>
+                                        <useBeanValidation>true</useBeanValidation>
+                                        <dateLibrary>java8</dateLibrary>
+                                    </configOptions>
+                                </configuration>
+                            </execution>
                         </executions>
                     </plugin>
                     <plugin>
@@ -229,7 +258,40 @@
                                 </configuration>
                             </execution>
                         </executions>
-
+                    </plugin>
+                    <plugin>
+                        <groupId>kr.motd.maven</groupId>
+                        <artifactId>os-maven-plugin</artifactId>
+                        <version>1.7.0</version>
+                        <executions>
+                            <execution>
+                                <phase>initialize</phase>
+                                <goals>
+                                    <goal>detect</goal>
+                                </goals>
+                            </execution>
+                        </executions>
+                    </plugin>
+                    <plugin>
+                        <groupId>org.xolstice.maven.plugins</groupId>
+                        <artifactId>protobuf-maven-plugin</artifactId>
+                        <version>0.6.1</version>
+                        <executions>
+                            <execution>
+                                <phase>generate-sources</phase>
+                                <goals>
+                                    <goal>compile</goal>
+                                </goals>
+                            </execution>
+                        </executions>
+                        <configuration>
+                            <attachProtoSources>false</attachProtoSources>
+                            <protoSourceRoot>${project.basedir}/src/main/resources/proto/prometheus-remote-write-api</protoSourceRoot>
+                            <includes>
+                                <include>**/*.proto</include>
+                            </includes>
+                            <protocArtifact>com.google.protobuf:protoc:3.21.12:exe:${os.detected.classifier}</protocArtifact>
+                        </configuration>
                     </plugin>
                 </plugins>
             </build>

+ 133 - 0
kafka-ui-contract/src/main/resources/proto/prometheus-remote-write-api/gogoproto/gogo.proto

@@ -0,0 +1,133 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+package gogoproto;
+
+import "google/protobuf/descriptor.proto";
+
+option java_package = "com.google.protobuf";
+option java_outer_classname = "GoGoProtos";
+option go_package = "github.com/gogo/protobuf/gogoproto";
+
+extend google.protobuf.EnumOptions {
+    optional bool goproto_enum_prefix = 62001;
+    optional bool goproto_enum_stringer = 62021;
+    optional bool enum_stringer = 62022;
+    optional string enum_customname = 62023;
+    optional bool enumdecl = 62024;
+}
+
+extend google.protobuf.EnumValueOptions {
+    optional string enumvalue_customname = 66001;
+}
+
+extend google.protobuf.FileOptions {
+    optional bool goproto_getters_all = 63001;
+    optional bool goproto_enum_prefix_all = 63002;
+    optional bool goproto_stringer_all = 63003;
+    optional bool verbose_equal_all = 63004;
+    optional bool face_all = 63005;
+    optional bool gostring_all = 63006;
+    optional bool populate_all = 63007;
+    optional bool stringer_all = 63008;
+    optional bool onlyone_all = 63009;
+
+    optional bool equal_all = 63013;
+    optional bool description_all = 63014;
+    optional bool testgen_all = 63015;
+    optional bool benchgen_all = 63016;
+    optional bool marshaler_all = 63017;
+    optional bool unmarshaler_all = 63018;
+    optional bool stable_marshaler_all = 63019;
+
+    optional bool sizer_all = 63020;
+
+    optional bool goproto_enum_stringer_all = 63021;
+    optional bool enum_stringer_all = 63022;
+
+    optional bool unsafe_marshaler_all = 63023;
+    optional bool unsafe_unmarshaler_all = 63024;
+
+    optional bool goproto_extensions_map_all = 63025;
+    optional bool goproto_unrecognized_all = 63026;
+    optional bool gogoproto_import = 63027;
+    optional bool protosizer_all = 63028;
+    optional bool compare_all = 63029;
+    optional bool typedecl_all = 63030;
+    optional bool enumdecl_all = 63031;
+
+    optional bool goproto_registration = 63032;
+}
+
+extend google.protobuf.MessageOptions {
+    optional bool goproto_getters = 64001;
+    optional bool goproto_stringer = 64003;
+    optional bool verbose_equal = 64004;
+    optional bool face = 64005;
+    optional bool gostring = 64006;
+    optional bool populate = 64007;
+    optional bool stringer = 67008;
+    optional bool onlyone = 64009;
+
+    optional bool equal = 64013;
+    optional bool description = 64014;
+    optional bool testgen = 64015;
+    optional bool benchgen = 64016;
+    optional bool marshaler = 64017;
+    optional bool unmarshaler = 64018;
+    optional bool stable_marshaler = 64019;
+
+    optional bool sizer = 64020;
+
+    optional bool unsafe_marshaler = 64023;
+    optional bool unsafe_unmarshaler = 64024;
+
+    optional bool goproto_extensions_map = 64025;
+    optional bool goproto_unrecognized = 64026;
+
+    optional bool protosizer = 64028;
+    optional bool compare = 64029;
+
+    optional bool typedecl = 64030;
+}
+
+extend google.protobuf.FieldOptions {
+    optional bool nullable = 65001;
+    optional bool embed = 65002;
+    optional string customtype = 65003;
+    optional string customname = 65004;
+    optional string jsontag = 65005;
+    optional string moretags = 65006;
+    optional string casttype = 65007;
+    optional string castkey = 65008;
+    optional string castvalue = 65009;
+
+    optional bool stdtime = 65010;
+    optional bool stdduration = 65011;
+}

+ 88 - 0
kafka-ui-contract/src/main/resources/proto/prometheus-remote-write-api/remote.proto

@@ -0,0 +1,88 @@
+// Copyright 2016 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package prometheus;
+
+option go_package = "prompb";
+
+import "types.proto";
+import "gogoproto/gogo.proto";
+
+message WriteRequest {
+    repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false];
+    // Cortex uses this field to determine the source of the write request.
+    // We reserve it to avoid any compatibility issues.
+    reserved  2;
+    repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false];
+}
+
+// ReadRequest represents a remote read request.
+message ReadRequest {
+    repeated Query queries = 1;
+
+    enum ResponseType {
+        // Server will return a single ReadResponse message with matched series that includes list of raw samples.
+        // It's recommended to use streamed response types instead.
+        //
+        // Response headers:
+        // Content-Type: "application/x-protobuf"
+        // Content-Encoding: "snappy"
+        SAMPLES = 0;
+        // Server will stream a delimited ChunkedReadResponse message that
+        // contains XOR or HISTOGRAM(!) encoded chunks for a single series.
+        // Each message is following varint size and fixed size bigendian
+        // uint32 for CRC32 Castagnoli checksum.
+        //
+        // Response headers:
+        // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
+        // Content-Encoding: ""
+        STREAMED_XOR_CHUNKS = 1;
+    }
+
+    // accepted_response_types allows negotiating the content type of the response.
+    //
+    // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
+    // implemented by server, error is returned.
+    // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
+    repeated ResponseType accepted_response_types = 2;
+}
+
+// ReadResponse is a response when response_type equals SAMPLES.
+message ReadResponse {
+    // In same order as the request's queries.
+    repeated QueryResult results = 1;
+}
+
+message Query {
+    int64 start_timestamp_ms = 1;
+    int64 end_timestamp_ms = 2;
+    repeated prometheus.LabelMatcher matchers = 3;
+    prometheus.ReadHints hints = 4;
+}
+
+message QueryResult {
+    // Samples within a time series must be ordered by time.
+    repeated prometheus.TimeSeries timeseries = 1;
+}
+
+// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
+// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
+// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
+// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
+message ChunkedReadResponse {
+    repeated prometheus.ChunkedSeries chunked_series = 1;
+
+    // query_index represents an index of the query from ReadRequest.queries these chunks relates to.
+    int64 query_index = 2;
+}

+ 187 - 0
kafka-ui-contract/src/main/resources/proto/prometheus-remote-write-api/types.proto

@@ -0,0 +1,187 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package prometheus;
+
+option go_package = "prompb";
+
+import "gogoproto/gogo.proto";
+
+message MetricMetadata {
+    enum MetricType {
+        UNKNOWN        = 0;
+        COUNTER        = 1;
+        GAUGE          = 2;
+        HISTOGRAM      = 3;
+        GAUGEHISTOGRAM = 4;
+        SUMMARY        = 5;
+        INFO           = 6;
+        STATESET       = 7;
+    }
+
+    // Represents the metric type, these match the set from Prometheus.
+    // Refer to model/textparse/interface.go for details.
+    MetricType type = 1;
+    string metric_family_name = 2;
+    string help = 4;
+    string unit = 5;
+}
+
+message Sample {
+    double value    = 1;
+    // timestamp is in ms format, see model/timestamp/timestamp.go for
+    // conversion from time.Time to Prometheus timestamp.
+    int64 timestamp = 2;
+}
+
+message Exemplar {
+    // Optional, can be empty.
+    repeated Label labels = 1 [(gogoproto.nullable) = false];
+    double value = 2;
+    // timestamp is in ms format, see model/timestamp/timestamp.go for
+    // conversion from time.Time to Prometheus timestamp.
+    int64 timestamp = 3;
+}
+
+// A native histogram, also known as a sparse histogram.
+// Original design doc:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+// The appendix of this design doc also explains the concept of float
+// histograms. This Histogram message can represent both, the usual
+// integer histogram as well as a float histogram.
+message Histogram {
+    enum ResetHint {
+        UNKNOWN = 0; // Need to test for a counter reset explicitly.
+        YES     = 1; // This is the 1st histogram after a counter reset.
+        NO      = 2; // There was no counter reset between this and the previous Histogram.
+        GAUGE   = 3; // This is a gauge histogram where counter resets don't happen.
+    }
+
+    oneof count { // Count of observations in the histogram.
+        uint64 count_int   = 1;
+        double count_float = 2;
+    }
+    double sum = 3; // Sum of observations in the histogram.
+    // The schema defines the bucket schema. Currently, valid numbers
+    // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+    // is a bucket boundary in each case, and then each power of two is
+    // divided into 2^n logarithmic buckets. Or in other words, each
+    // bucket boundary is the previous boundary times 2^(2^-n). In the
+    // future, more bucket schemas may be added using numbers < -4 or >
+    // 8.
+    sint32 schema             = 4;
+    double zero_threshold     = 5; // Breadth of the zero bucket.
+    oneof zero_count { // Count in zero bucket.
+        uint64 zero_count_int     = 6;
+        double zero_count_float   = 7;
+    }
+
+    // Negative Buckets.
+    repeated BucketSpan negative_spans =  8 [(gogoproto.nullable) = false];
+    // Use either "negative_deltas" or "negative_counts", the former for
+    // regular histograms with integer counts, the latter for float
+    // histograms.
+    repeated sint64 negative_deltas    =  9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+    repeated double negative_counts    = 10; // Absolute count of each bucket.
+
+    // Positive Buckets.
+    repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
+    // Use either "positive_deltas" or "positive_counts", the former for
+    // regular histograms with integer counts, the latter for float
+    // histograms.
+    repeated sint64 positive_deltas    = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+    repeated double positive_counts    = 13; // Absolute count of each bucket.
+
+    ResetHint reset_hint               = 14;
+    // timestamp is in ms format, see model/timestamp/timestamp.go for
+    // conversion from time.Time to Prometheus timestamp.
+    int64 timestamp = 15;
+}
+
+// A BucketSpan defines a number of consecutive buckets with their
+// offset. Logically, it would be more straightforward to include the
+// bucket counts in the Span. However, the protobuf representation is
+// more compact in the way the data is structured here (with all the
+// buckets in a single array separate from the Spans).
+message BucketSpan {
+    sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
+    uint32 length = 2; // Length of consecutive buckets.
+}
+
+// TimeSeries represents samples and labels for a single time series.
+message TimeSeries {
+    // For a timeseries to be valid, and for the samples and exemplars
+    // to be ingested by the remote system properly, the labels field is required.
+    repeated Label labels         = 1 [(gogoproto.nullable) = false];
+    repeated Sample samples       = 2 [(gogoproto.nullable) = false];
+    repeated Exemplar exemplars   = 3 [(gogoproto.nullable) = false];
+    repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
+}
+
+message Label {
+    string name  = 1;
+    string value = 2;
+}
+
+message Labels {
+    repeated Label labels = 1 [(gogoproto.nullable) = false];
+}
+
+// Matcher specifies a rule, which can match or set of labels or not.
+message LabelMatcher {
+    enum Type {
+        EQ  = 0;
+        NEQ = 1;
+        RE  = 2;
+        NRE = 3;
+    }
+    Type type    = 1;
+    string name  = 2;
+    string value = 3;
+}
+
+message ReadHints {
+    int64 step_ms = 1;  // Query step size in milliseconds.
+    string func = 2;    // String representation of surrounding function or aggregation.
+    int64 start_ms = 3; // Start time in milliseconds.
+    int64 end_ms = 4;   // End time in milliseconds.
+    repeated string grouping = 5; // List of label names used in aggregation.
+    bool by = 6; // Indicate whether it is without or by.
+    int64 range_ms = 7; // Range vector selector range in milliseconds.
+}
+
+// Chunk represents a TSDB chunk.
+// Time range [min, max] is inclusive.
+message Chunk {
+    int64 min_time_ms = 1;
+    int64 max_time_ms = 2;
+
+    // We require this to match chunkenc.Encoding.
+    enum Encoding {
+        UNKNOWN         = 0;
+        XOR             = 1;
+        HISTOGRAM       = 2;
+        FLOAT_HISTOGRAM = 3;
+    }
+    Encoding type  = 3;
+    bytes data     = 4;
+}
+
+// ChunkedSeries represents single, encoded time series.
+message ChunkedSeries {
+    // Labels should be sorted.
+    repeated Label labels = 1 [(gogoproto.nullable) = false];
+    // Chunks will be in start time order and may overlap.
+    repeated Chunk chunks = 2 [(gogoproto.nullable) = false];
+}

+ 194 - 1
kafka-ui-contract/src/main/resources/swagger/kafka-ui-api.yaml

@@ -1,4 +1,4 @@
-openapi: 3.0.0
+openapi: 3.0.1
 info:
   description: Api Documentation
   version: 0.1.0
@@ -32,6 +32,52 @@ paths:
                   $ref: '#/components/schemas/Cluster'
 
 
+  /api/clusters/{clusterName}/graphs/descriptions:
+    get:
+      tags:
+        - Graphs
+      summary: getGraphsList
+      operationId: getGraphsList
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+      responses:
+        200:
+          description: |
+            Success
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/GraphDescriptions'
+
+  /api/clusters/{clusterName}/graphs/prometheus:
+    post:
+      tags:
+        - Graphs
+      summary: getGraphData
+      operationId: getGraphData
+      parameters:
+        - name: clusterName
+          in: path
+          required: true
+          schema:
+            type: string
+      requestBody:
+        content:
+          application/json:
+            schema:
+              $ref: '#/components/schemas/GraphDataRequest'
+      responses:
+        200:
+          description: OK
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/PrometheusApiQueryResponse'
+
   /api/clusters/{clusterName}/cache:
     post:
       tags:
@@ -157,6 +203,20 @@ paths:
               schema:
                 $ref: '#/components/schemas/ClusterMetrics'
 
+  /metrics:
+    get:
+      tags:
+        - PrometheusExpose
+      summary: getAllMetrics
+      operationId: getAllMetrics
+      responses:
+        200:
+          description: OK
+          content:
+            application/text:
+              schema:
+                type: string
+
   /api/clusters/{clusterName}/stats:
     get:
       tags:
@@ -3740,6 +3800,112 @@ components:
           additionalProperties:
             $ref: '#/components/schemas/ClusterConfigValidation'
 
+    GraphDataRequest:
+      type: object
+      properties:
+        id:
+          type: string
+        parameters:
+          type: object
+          additionalProperties:
+            type: string
+        from:
+          type: string
+          format: date-time
+        to:
+          type: string
+          format: date-time
+
+    GraphDescriptions:
+      type: object
+      properties:
+        graphs:
+          type: array
+          items:
+            $ref: '#/components/schemas/GraphDescription'
+
+    GraphDescription:
+      type: object
+      required: ["id"]
+      properties:
+        id:
+          type: string
+          description: Id that should be used to query data on API level
+        type:
+          type: string
+          enum: ["range", "instant"]
+        defaultPeriod:
+          type: string
+          description: ISO_8601 duration string (for "range" graphs only)
+        parameters:
+          type: array
+          items:
+            $ref: '#/components/schemas/GraphParameter'
+
+    GraphParameter:
+      type: object
+      required: ["name"]
+      properties:
+        name:
+          type: string
+
+    PrometheusApiBaseResponse:
+      type: object
+      required: [ status ]
+      properties:
+        status:
+          type: string
+          enum: [ "success", "error" ]
+        error:
+          type: string
+        errorType:
+          type: string
+        warnings:
+          type: array
+          items:
+            type: string
+
+    PrometheusApiQueryResponse:
+      type: object
+      allOf:
+        - $ref: "#/components/schemas/PrometheusApiBaseResponse"
+      properties:
+        data:
+          $ref: '#/components/schemas/PrometheusApiQueryResponseData'
+
+    PrometheusApiQueryResponseData:
+      type: object
+      required: [ "resultType" ]
+      properties:
+        resultType:
+          type: string
+          enum: [ "matrix", "vector", "scalar", "string"]
+        result:
+          type: array
+          items: { }
+          description: |
+            Depending on resultType format can vary:
+            "vector":
+              [
+                {
+                  "metric": { "<label_name>": "<label_value>", ... },
+                  "value": [ <unix_time>, "<sample_value>" ],
+                  "histogram": [ <unix_time>, <histogram> ]
+               }, ...
+              ]
+            "matrix":
+              [
+                {
+                  "metric": { "<label_name>": "<label_value>", ... },
+                  "values": [ [ <unix_time>, "<sample_value>" ], ... ],
+                  "histograms": [ [ <unix_time>, <histogram> ], ... ]
+                }, ...
+              ]
+            "scalar":
+              [ <unix_time>, "<scalar_value>" ]
+            "string":
+              [ <unix_time>, "<string_value>" ]
+
     ApplicationPropertyValidation:
       type: object
       required: [error]
@@ -3764,6 +3930,8 @@ components:
             $ref: '#/components/schemas/ApplicationPropertyValidation'
         ksqldb:
           $ref: '#/components/schemas/ApplicationPropertyValidation'
+        prometheusStorage:
+          $ref: '#/components/schemas/ApplicationPropertyValidation'
 
     ApplicationConfig:
       type: object
@@ -3960,6 +4128,31 @@ components:
                             type: string
                           keystorePassword:
                             type: string
+                          prometheusExpose:
+                            type: boolean
+                          store:
+                            type: object
+                            properties:
+                              prometheus:
+                                type: object
+                                properties:
+                                  url:
+                                    type: string
+                                  remoteWrite:
+                                    type: boolean
+                                  pushGatewayUrl:
+                                    type: string
+                                  pushGatewayUsername:
+                                    type: string
+                                  pushGatewayPassword:
+                                    type: string
+                                  pushGatewayJobName:
+                                    type: string
+                              kafka:
+                                type: object
+                                properties:
+                                  topic:
+                                    type: string
                       properties:
                         type: object
                         additionalProperties: true

+ 365 - 0
kafka-ui-contract/src/main/resources/swagger/prometheus-query-api.yaml

@@ -0,0 +1,365 @@
+openapi: 3.0.1
+info:
+  title: |
+    Prometheus query HTTP API
+  version: 0.1.0
+  contact: { }
+
+tags:
+  - name: /promclient
+servers:
+  - url: /localhost
+
+
+paths:
+  /api/v1/label/{label_name}/values:
+    get:
+      tags:
+        - PrometheusClient
+      summary: Returns label values
+      description: "returns a list of label values for a provided label name"
+      operationId: getLabelValues
+      parameters:
+        - name: label_name
+          in: path
+          required: true
+          schema:
+            type: string
+        - name: start
+          in: query
+          description: Start timestamp.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: end
+          in: query
+          description: End timestamp.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: match[]
+          in: query
+          description: Repeated series selector argument that selects the series from which to read the label values.
+          schema:
+            type: string
+            format: series_selector
+      responses:
+        200:
+          description: Success
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/LabelValuesResponse'
+
+  /api/v1/labels:
+    get:
+      tags:
+        - PrometheusClient
+      summary: Returns label names
+      description: returns a list of label names
+      operationId: getLabelNames
+      parameters:
+        - name: start
+          in: query
+          description: |
+            Start timestamp.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: end
+          in: query
+          description: |
+            End timestamp.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: match[]
+          in: query
+          description: Repeated series selector argument that selects the series from which to read the label values. Optional.
+          schema:
+            type: string
+            format: series_selector
+      responses:
+        200:
+          description: Success
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/LabelNamesResponse'
+
+  /api/v1/metadata:
+    get:
+      tags:
+        - PrometheusClient
+      summary: Returns metric metadata
+      description: returns a list of label names
+      operationId: getMetricMetadata
+      parameters:
+        - name: limit
+          in: query
+          description: Maximum number of metrics to return.
+          required: true
+          schema:
+            type: integer
+        - name: metric
+          in: query
+          description: A metric name to filter metadata for. All metric metadata is retrieved if left empty.
+          schema:
+            type: string
+      responses:
+        200:
+          description: Success
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/MetadataResponse'
+        201:
+          description: |
+            Success
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/MetadataResponse'
+
+  /api/v1/query:
+    get:
+      tags:
+        - PrometheusClient
+      summary: Evaluates instant query
+      description: |
+        Evaluates an instant query at a single point in time
+      operationId: query
+      parameters:
+        - name: query
+          in: query
+          description: |
+            Prometheus expression query string.
+          required: true
+          schema:
+            type: string
+        - name: time
+          in: query
+          description: |
+            Evaluation timestamp. Optional.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: timeout
+          in: query
+          description: |
+            Evaluation timeout. Optional.
+          schema:
+            type: string
+            format: duration
+      responses:
+        200:
+          description: |
+            Success
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/QueryResponse'
+
+
+  /api/v1/query_range:
+    get:
+      tags:
+        - PrometheusClient
+      summary: Evaluates query over range of time.
+      description: Evaluates an expression query over a range of time
+      operationId: queryRange
+      parameters:
+        - name: query
+          in: query
+          description: Prometheus expression query string.
+          required: true
+          schema:
+            type: string
+        - name: start
+          in: query
+          description: Start timestamp.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: end
+          in: query
+          description: End timestamp.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: step
+          in: query
+          description: |
+            Query resolution step width in ```duration``` format or float number of seconds.
+          schema:
+            type: string
+            format: duration | float
+        - name: timeout
+          in: query
+          description: |
+            Evaluation timeout. Optional. Defaults to and is capped by the value of the ```-query.timeout``` flag.
+          schema:
+            type: string
+            format: duration
+      responses:
+        200:
+          description: |
+            Success
+          content:
+            application/json:
+              schema:
+                $ref: "#/components/schemas/QueryResponse"
+
+
+  /api/v1/series:
+    get:
+      tags:
+        - PrometheusClient
+      summary: Returns time series
+      operationId: getSeries
+      parameters:
+        - name: start
+          in: query
+          description: |
+            Start timestamp. Optional.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: end
+          in: query
+          description: |
+            End timestamp. Optional.
+          schema:
+            type: string
+            format: rfc3339 | unix_timestamp
+        - name: match[]
+          in: query
+          description: |
+            Repeated series selector argument that selects the series to return. At least one ```match[]``` argument must be provided.
+          required: true
+          schema:
+            type: string
+            format: series_selector
+      responses:
+        200:
+          description: |
+            Success
+          content:
+            application/json:
+              schema:
+                $ref: '#/components/schemas/SeriesResponse'
+
+components:
+  schemas:
+    BaseResponse:
+      type: object
+      required: [ status ]
+      properties:
+        status:
+          type: string
+          enum: [ "success", "error" ]
+        error:
+          type: string
+        errorType:
+          type: string
+        warnings:
+          type: array
+          items:
+            type: string
+
+    QueryResponse:
+      type: object
+      allOf:
+        - $ref: "#/components/schemas/BaseResponse"
+      properties:
+        data:
+          $ref: '#/components/schemas/QueryResponseData'
+
+    QueryResponseData:
+      type: object
+      required: [ "resultType" ]
+      properties:
+        resultType:
+          type: string
+          enum: [ "matrix", "vector", "scalar", "string"]
+        result:
+          type: array
+          items: { }
+          description: |
+            Depending on resultType format can vary:
+            "vector":
+              [
+                {
+                  "metric": { "<label_name>": "<label_value>", ... },
+                  "value": [ <unix_time>, "<sample_value>" ],
+                  "histogram": [ <unix_time>, <histogram> ]
+               }, ...
+              ]
+            "matrix":
+              [
+                {
+                  "metric": { "<label_name>": "<label_value>", ... },
+                  "values": [ [ <unix_time>, "<sample_value>" ], ... ],
+                  "histograms": [ [ <unix_time>, <histogram> ], ... ]
+                }, ...
+              ]
+            "scalar":
+              [ <unix_time>, "<scalar_value>" ]
+            "string":
+              [ <unix_time>, "<string_value>" ]
+
+    SeriesResponse:
+      type: object
+      allOf:
+        - $ref: "#/components/schemas/BaseResponse"
+      properties:
+        data:
+          type: array
+          description: a list of objects that contain the label name/value pairs which
+            identify each series
+          items:
+            type: object
+            properties:
+              __name__:
+                type: string
+              job:
+                type: string
+              instance:
+                type: string
+
+    MetadataResponse:
+      type: object
+      allOf:
+        - $ref: "#/components/schemas/BaseResponse"
+      properties:
+        data:
+          type: object
+          additionalProperties:
+            type: array
+            items:
+              type: object
+              additionalProperties: true
+
+    LabelValuesResponse:
+      type: object
+      allOf:
+        - $ref: "#/components/schemas/BaseResponse"
+      properties:
+        data:
+          type: array
+          description: a list of string label values
+          items:
+            type: string
+
+    LabelNamesResponse:
+      type: object
+      allOf:
+        - $ref: "#/components/schemas/BaseResponse"
+      properties:
+        data:
+          type: array
+          description: a list of string label names
+          items:
+            type: string
+
+