Compare commits
42 commits
master
...
metrics_ph
Author | SHA1 | Date | |
---|---|---|---|
![]() |
7d14a3301d | ||
![]() |
0730bab773 | ||
![]() |
9dcb4daf1a | ||
![]() |
060fbbb3fa | ||
![]() |
17680e6cd1 | ||
![]() |
1bb53bc976 | ||
![]() |
f9ba54317b | ||
![]() |
4f27a39b68 | ||
![]() |
79ba8313f7 | ||
![]() |
3a4233f424 | ||
![]() |
725c823947 | ||
![]() |
e5539f21b5 | ||
![]() |
7eeb5538d2 | ||
![]() |
4bd70b7952 | ||
![]() |
3da32c9540 | ||
![]() |
066157577d | ||
![]() |
d6c16e2905 | ||
![]() |
28c9871ff2 | ||
![]() |
e9793ecf67 | ||
![]() |
35910fc2a2 | ||
![]() |
a9a22b4025 | ||
![]() |
7e3831b715 | ||
![]() |
3789693777 | ||
![]() |
0ec59e8cab | ||
![]() |
12c6552433 | ||
![]() |
7c3fb18b79 | ||
![]() |
13a55821f4 | ||
![]() |
3facb821a8 | ||
![]() |
8e6b47ad85 | ||
![]() |
30510781c6 | ||
![]() |
9d2ecf533e | ||
![]() |
9f2bca162c | ||
![]() |
8062492e2b | ||
![]() |
98b1aa5ec0 | ||
![]() |
5474a34936 | ||
![]() |
cb159e1af5 | ||
![]() |
dc95269234 | ||
![]() |
70b08498d9 | ||
![]() |
dbab4e367d | ||
![]() |
daedc4d7c7 | ||
![]() |
54a5398413 | ||
![]() |
38eb68dcc5 |
160 changed files with 6861 additions and 3861 deletions
2
.github/workflows/aws_publisher.yaml
vendored
2
.github/workflows/aws_publisher.yaml
vendored
|
@ -31,7 +31,7 @@ jobs:
|
|||
echo "Packer will be triggered in this dir $WORK_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_AMI_PUBLISH_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_AMI_PUBLISH_KEY_SECRET }}
|
||||
|
|
2
.github/workflows/branch-deploy.yml
vendored
2
.github/workflows/branch-deploy.yml
vendored
|
@ -45,7 +45,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/build-public-image.yml
vendored
2
.github/workflows/build-public-image.yml
vendored
|
@ -42,7 +42,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/cve.yaml
vendored
2
.github/workflows/cve.yaml
vendored
|
@ -55,7 +55,7 @@ jobs:
|
|||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
|
||||
- name: Run CVE checks
|
||||
uses: aquasecurity/trivy-action@0.12.0
|
||||
uses: aquasecurity/trivy-action@0.11.2
|
||||
with:
|
||||
image-ref: "provectuslabs/kafka-ui:${{ steps.build.outputs.version }}"
|
||||
format: "table"
|
||||
|
|
2
.github/workflows/delete-public-image.yml
vendored
2
.github/workflows/delete-public-image.yml
vendored
|
@ -15,7 +15,7 @@ jobs:
|
|||
tag='${{ github.event.pull_request.number }}'
|
||||
echo "tag=${tag}" >> $GITHUB_OUTPUT
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/e2e-automation.yml
vendored
2
.github/workflows/e2e-automation.yml
vendored
|
@ -24,7 +24,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/e2e-checks.yaml
vendored
2
.github/workflows/e2e-checks.yaml
vendored
|
@ -18,7 +18,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.S3_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.S3_AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/e2e-weekly.yml
vendored
2
.github/workflows/e2e-weekly.yml
vendored
|
@ -11,7 +11,7 @@ jobs:
|
|||
with:
|
||||
ref: ${{ github.sha }}
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
6
.github/workflows/frontend.yaml
vendored
6
.github/workflows/frontend.yaml
vendored
|
@ -25,11 +25,11 @@ jobs:
|
|||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
with:
|
||||
version: 8.6.12
|
||||
version: 7.4.0
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v3.8.1
|
||||
uses: actions/setup-node@v3.7.0
|
||||
with:
|
||||
node-version: "18.17.1"
|
||||
node-version: "16.15.0"
|
||||
cache: "pnpm"
|
||||
cache-dependency-path: "./kafka-ui-react-app/pnpm-lock.yaml"
|
||||
- name: Install Node dependencies
|
||||
|
|
|
@ -47,7 +47,7 @@ jobs:
|
|||
restore-keys: |
|
||||
${{ runner.os }}-buildx-
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
2
.github/workflows/terraform-deploy.yml
vendored
2
.github/workflows/terraform-deploy.yml
vendored
|
@ -26,7 +26,7 @@ jobs:
|
|||
echo "Terraform will be triggered in this dir $TF_DIR"
|
||||
|
||||
- name: Configure AWS credentials for Kafka-UI account
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
uses: aws-actions/configure-aws-credentials@v2
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -91,7 +91,7 @@ docker run -it -p 8080:8080 -e DYNAMIC_CONFIG_ENABLED=true provectuslabs/kafka-u
|
|||
|
||||
Then access the web UI at [http://localhost:8080](http://localhost:8080)
|
||||
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/quick-start/persistent-start)
|
||||
The command is sufficient to try things out. When you're done trying things out, you can proceed with a [persistent installation](https://docs.kafka-ui.provectus.io/configuration/quick-start#persistent-start)
|
||||
|
||||
## Persistent installation
|
||||
|
||||
|
|
|
@ -13,16 +13,29 @@ services:
|
|||
- schema-registry0
|
||||
- kafka-connect0
|
||||
environment:
|
||||
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_METRICS_STORE_PROMETHEUS_URL: "http://prometheus:9090"
|
||||
KAFKA_CLUSTERS_0_METRICS_STORE_PROMETHEUS_REMOTEWRITE: 'true'
|
||||
KAFKA_CLUSTERS_0_METRICS_STORE_KAFKA_TOPIC: "kafka_metrics"
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schema-registry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
DYNAMIC_CONFIG_ENABLED: 'true' # not necessary, added for tests
|
||||
KAFKA_CLUSTERS_0_AUDIT_TOPICAUDITENABLED: 'true'
|
||||
KAFKA_CLUSTERS_0_AUDIT_CONSOLEAUDITENABLED: 'true'
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
hostname: prometheus
|
||||
container_name: prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
volumes:
|
||||
- ./scripts:/etc/prometheus
|
||||
command: --web.enable-remote-write-receiver --config.file=/etc/prometheus/prometheus.yaml
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
hostname: kafka0
|
||||
|
|
14
documentation/compose/scripts/prometheus.yaml
Normal file
14
documentation/compose/scripts/prometheus.yaml
Normal file
|
@ -0,0 +1,14 @@
|
|||
global:
|
||||
scrape_interval: 30s
|
||||
scrape_timeout: 10s
|
||||
|
||||
rule_files:
|
||||
- alert.yml
|
||||
|
||||
scrape_configs:
|
||||
- job_name: services
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- 'prometheus:9090'
|
||||
# - 'kafka-ui:8080'
|
|
@ -81,12 +81,6 @@
|
|||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-json-schema-serializer</artifactId>
|
||||
<version>${confluent.version}</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>commons-collections</groupId>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
</exclusion>
|
||||
</exclusions>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
|
@ -141,11 +135,6 @@
|
|||
<artifactId>commons-pool2</artifactId>
|
||||
<version>${apache.commons.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-collections4</artifactId>
|
||||
<version>4.4</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.testcontainers</groupId>
|
||||
<artifactId>testcontainers</artifactId>
|
||||
|
@ -249,6 +238,25 @@
|
|||
<groupId>org.springframework.security</groupId>
|
||||
<artifactId>spring-security-ldap</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>io.prometheus</groupId>
|
||||
<artifactId>simpleclient</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.prometheus</groupId>
|
||||
<artifactId>simpleclient_common</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.prometheus</groupId>
|
||||
<artifactId>simpleclient_pushgateway</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.xerial.snappy</groupId>
|
||||
<artifactId>snappy-java</artifactId>
|
||||
<version>1.1.8.4</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.codehaus.groovy</groupId>
|
||||
<artifactId>groovy-jsr223</artifactId>
|
||||
|
@ -403,7 +411,7 @@
|
|||
<plugin>
|
||||
<groupId>pl.project13.maven</groupId>
|
||||
<artifactId>git-commit-id-plugin</artifactId>
|
||||
<version>4.9.10</version>
|
||||
<version>4.0.0</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>get-the-git-infos</id>
|
||||
|
|
176
kafka-ui-api/src/main/antlr4/promql/PromQLLexer.g4
Normal file
176
kafka-ui-api/src/main/antlr4/promql/PromQLLexer.g4
Normal file
|
@ -0,0 +1,176 @@
|
|||
lexer grammar PromQLLexer;
|
||||
|
||||
channels { WHITESPACE, COMMENTS }
|
||||
|
||||
// All keywords in PromQL are case insensitive, it is just function,
|
||||
// label and metric names that are not.
|
||||
options { caseInsensitive=true; }
|
||||
|
||||
fragment NUMERAL: [0-9]+ ('.' [0-9]+)?;
|
||||
|
||||
fragment SCIENTIFIC_NUMBER
|
||||
: NUMERAL ('e' [-+]? NUMERAL)?
|
||||
;
|
||||
|
||||
NUMBER
|
||||
: NUMERAL
|
||||
| SCIENTIFIC_NUMBER;
|
||||
|
||||
STRING
|
||||
: '\'' (~('\'' | '\\') | '\\' .)* '\''
|
||||
| '"' (~('"' | '\\') | '\\' .)* '"'
|
||||
;
|
||||
|
||||
// Binary operators
|
||||
|
||||
ADD: '+';
|
||||
SUB: '-';
|
||||
MULT: '*';
|
||||
DIV: '/';
|
||||
MOD: '%';
|
||||
POW: '^';
|
||||
|
||||
AND: 'and';
|
||||
OR: 'or';
|
||||
UNLESS: 'unless';
|
||||
|
||||
// Comparison operators
|
||||
|
||||
EQ: '=';
|
||||
DEQ: '==';
|
||||
NE: '!=';
|
||||
GT: '>';
|
||||
LT: '<';
|
||||
GE: '>=';
|
||||
LE: '<=';
|
||||
RE: '=~';
|
||||
NRE: '!~';
|
||||
|
||||
// Aggregation modifiers
|
||||
|
||||
BY: 'by';
|
||||
WITHOUT: 'without';
|
||||
|
||||
// Join modifiers
|
||||
|
||||
ON: 'on';
|
||||
IGNORING: 'ignoring';
|
||||
GROUP_LEFT: 'group_left';
|
||||
GROUP_RIGHT: 'group_right';
|
||||
|
||||
OFFSET: 'offset';
|
||||
|
||||
BOOL: 'bool';
|
||||
|
||||
AGGREGATION_OPERATOR
|
||||
: 'sum'
|
||||
| 'min'
|
||||
| 'max'
|
||||
| 'avg'
|
||||
| 'group'
|
||||
| 'stddev'
|
||||
| 'stdvar'
|
||||
| 'count'
|
||||
| 'count_values'
|
||||
| 'bottomk'
|
||||
| 'topk'
|
||||
| 'quantile'
|
||||
;
|
||||
|
||||
FUNCTION options { caseInsensitive=false; }
|
||||
: 'abs'
|
||||
| 'absent'
|
||||
| 'absent_over_time'
|
||||
| 'ceil'
|
||||
| 'changes'
|
||||
| 'clamp_max'
|
||||
| 'clamp_min'
|
||||
| 'day_of_month'
|
||||
| 'day_of_week'
|
||||
| 'days_in_month'
|
||||
| 'delta'
|
||||
| 'deriv'
|
||||
| 'exp'
|
||||
| 'floor'
|
||||
| 'histogram_quantile'
|
||||
| 'holt_winters'
|
||||
| 'hour'
|
||||
| 'idelta'
|
||||
| 'increase'
|
||||
| 'irate'
|
||||
| 'label_join'
|
||||
| 'label_replace'
|
||||
| 'ln'
|
||||
| 'log2'
|
||||
| 'log10'
|
||||
| 'minute'
|
||||
| 'month'
|
||||
| 'predict_linear'
|
||||
| 'rate'
|
||||
| 'resets'
|
||||
| 'round'
|
||||
| 'scalar'
|
||||
| 'sort'
|
||||
| 'sort_desc'
|
||||
| 'sqrt'
|
||||
| 'time'
|
||||
| 'timestamp'
|
||||
| 'vector'
|
||||
| 'year'
|
||||
| 'avg_over_time'
|
||||
| 'min_over_time'
|
||||
| 'max_over_time'
|
||||
| 'sum_over_time'
|
||||
| 'count_over_time'
|
||||
| 'quantile_over_time'
|
||||
| 'stddev_over_time'
|
||||
| 'stdvar_over_time'
|
||||
| 'last_over_time'
|
||||
| 'acos'
|
||||
| 'acosh'
|
||||
| 'asin'
|
||||
| 'asinh'
|
||||
| 'atan'
|
||||
| 'atanh'
|
||||
| 'cos'
|
||||
| 'cosh'
|
||||
| 'sin'
|
||||
| 'sinh'
|
||||
| 'tan'
|
||||
| 'tanh'
|
||||
| 'deg'
|
||||
| 'pi'
|
||||
| 'rad'
|
||||
;
|
||||
|
||||
LEFT_BRACE: '{';
|
||||
RIGHT_BRACE: '}';
|
||||
|
||||
LEFT_PAREN: '(';
|
||||
RIGHT_PAREN: ')';
|
||||
|
||||
LEFT_BRACKET: '[';
|
||||
RIGHT_BRACKET: ']';
|
||||
|
||||
COMMA: ',';
|
||||
|
||||
AT: '@';
|
||||
|
||||
SUBQUERY_RANGE
|
||||
: LEFT_BRACKET DURATION ':' DURATION? RIGHT_BRACKET;
|
||||
|
||||
TIME_RANGE
|
||||
: LEFT_BRACKET DURATION RIGHT_BRACKET;
|
||||
|
||||
// The proper order (longest to the shortest) must be validated after parsing
|
||||
DURATION: ([0-9]+ ('ms' | [smhdwy]))+;
|
||||
|
||||
METRIC_NAME: [a-z_:] [a-z0-9_:]*;
|
||||
LABEL_NAME: [a-z_] [a-z0-9_]*;
|
||||
|
||||
|
||||
|
||||
WS: [\r\t\n ]+ -> channel(WHITESPACE);
|
||||
SL_COMMENT
|
||||
: '#' .*? '\n' -> channel(COMMENTS)
|
||||
;
|
114
kafka-ui-api/src/main/antlr4/promql/PromQLParser.g4
Normal file
114
kafka-ui-api/src/main/antlr4/promql/PromQLParser.g4
Normal file
|
@ -0,0 +1,114 @@
|
|||
parser grammar PromQLParser;
|
||||
|
||||
options { tokenVocab = PromQLLexer; }
|
||||
|
||||
expression: vectorOperation EOF;
|
||||
|
||||
// Binary operations are ordered by precedence
|
||||
|
||||
// Unary operations have the same precedence as multiplications
|
||||
|
||||
vectorOperation
|
||||
: <assoc=right> vectorOperation powOp vectorOperation
|
||||
| <assoc=right> vectorOperation subqueryOp
|
||||
| unaryOp vectorOperation
|
||||
| vectorOperation multOp vectorOperation
|
||||
| vectorOperation addOp vectorOperation
|
||||
| vectorOperation compareOp vectorOperation
|
||||
| vectorOperation andUnlessOp vectorOperation
|
||||
| vectorOperation orOp vectorOperation
|
||||
| vectorOperation vectorMatchOp vectorOperation
|
||||
| vectorOperation AT vectorOperation
|
||||
| vector
|
||||
;
|
||||
|
||||
// Operators
|
||||
|
||||
unaryOp: (ADD | SUB);
|
||||
powOp: POW grouping?;
|
||||
multOp: (MULT | DIV | MOD) grouping?;
|
||||
addOp: (ADD | SUB) grouping?;
|
||||
compareOp: (DEQ | NE | GT | LT | GE | LE) BOOL? grouping?;
|
||||
andUnlessOp: (AND | UNLESS) grouping?;
|
||||
orOp: OR grouping?;
|
||||
vectorMatchOp: (ON | UNLESS) grouping?;
|
||||
subqueryOp: SUBQUERY_RANGE offsetOp?;
|
||||
offsetOp: OFFSET DURATION;
|
||||
|
||||
vector
|
||||
: function_
|
||||
| aggregation
|
||||
| instantSelector
|
||||
| matrixSelector
|
||||
| offset
|
||||
| literal
|
||||
| parens
|
||||
;
|
||||
|
||||
parens: LEFT_PAREN vectorOperation RIGHT_PAREN;
|
||||
|
||||
// Selectors
|
||||
|
||||
instantSelector
|
||||
: METRIC_NAME (LEFT_BRACE labelMatcherList? RIGHT_BRACE)?
|
||||
| LEFT_BRACE labelMatcherList RIGHT_BRACE
|
||||
;
|
||||
|
||||
labelMatcher: labelName labelMatcherOperator STRING;
|
||||
labelMatcherOperator: EQ | NE | RE | NRE;
|
||||
labelMatcherList: labelMatcher (COMMA labelMatcher)* COMMA?;
|
||||
|
||||
matrixSelector: instantSelector TIME_RANGE;
|
||||
|
||||
offset
|
||||
: instantSelector OFFSET DURATION
|
||||
| matrixSelector OFFSET DURATION
|
||||
;
|
||||
|
||||
// Functions
|
||||
|
||||
function_: FUNCTION LEFT_PAREN (parameter (COMMA parameter)*)? RIGHT_PAREN;
|
||||
|
||||
parameter: literal | vectorOperation;
|
||||
parameterList: LEFT_PAREN (parameter (COMMA parameter)*)? RIGHT_PAREN;
|
||||
|
||||
// Aggregations
|
||||
|
||||
aggregation
|
||||
: AGGREGATION_OPERATOR parameterList
|
||||
| AGGREGATION_OPERATOR (by | without) parameterList
|
||||
| AGGREGATION_OPERATOR parameterList ( by | without)
|
||||
;
|
||||
by: BY labelNameList;
|
||||
without: WITHOUT labelNameList;
|
||||
|
||||
// Vector one-to-one/one-to-many joins
|
||||
|
||||
grouping: (on_ | ignoring) (groupLeft | groupRight)?;
|
||||
on_: ON labelNameList;
|
||||
ignoring: IGNORING labelNameList;
|
||||
groupLeft: GROUP_LEFT labelNameList?;
|
||||
groupRight: GROUP_RIGHT labelNameList?;
|
||||
|
||||
// Label names
|
||||
|
||||
labelName: keyword | METRIC_NAME | LABEL_NAME;
|
||||
labelNameList: LEFT_PAREN (labelName (COMMA labelName)*)? RIGHT_PAREN;
|
||||
|
||||
keyword
|
||||
: AND
|
||||
| OR
|
||||
| UNLESS
|
||||
| BY
|
||||
| WITHOUT
|
||||
| ON
|
||||
| IGNORING
|
||||
| GROUP_LEFT
|
||||
| GROUP_RIGHT
|
||||
| OFFSET
|
||||
| BOOL
|
||||
| AGGREGATION_OPERATOR
|
||||
| FUNCTION
|
||||
;
|
||||
|
||||
literal: NUMBER | STRING;
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import com.provectus.kafka.ui.model.MetricsConfig;
|
||||
import static com.provectus.kafka.ui.model.MetricsScrapeProperties.JMX_METRICS_TYPE;
|
||||
|
||||
import jakarta.annotation.PostConstruct;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -42,7 +43,7 @@ public class ClustersProperties {
|
|||
KsqldbServerAuth ksqldbServerAuth;
|
||||
KeystoreConfig ksqldbServerSsl;
|
||||
List<ConnectCluster> kafkaConnect;
|
||||
MetricsConfigData metrics;
|
||||
MetricsConfig metrics;
|
||||
Map<String, Object> properties;
|
||||
boolean readOnly = false;
|
||||
List<SerdeConfig> serde;
|
||||
|
@ -62,8 +63,8 @@ public class ClustersProperties {
|
|||
}
|
||||
|
||||
@Data
|
||||
@ToString(exclude = "password")
|
||||
public static class MetricsConfigData {
|
||||
@ToString(exclude = {"password", "keystorePassword"})
|
||||
public static class MetricsConfig {
|
||||
String type;
|
||||
Integer port;
|
||||
Boolean ssl;
|
||||
|
@ -71,6 +72,31 @@ public class ClustersProperties {
|
|||
String password;
|
||||
String keystoreLocation;
|
||||
String keystorePassword;
|
||||
|
||||
Boolean prometheusExpose;
|
||||
MetricsStorage store;
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class MetricsStorage {
|
||||
PrometheusStorage prometheus;
|
||||
KafkaMetricsStorage kafka;
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class KafkaMetricsStorage {
|
||||
String topic;
|
||||
}
|
||||
|
||||
@Data
|
||||
@ToString(exclude = {"pushGatewayPassword"})
|
||||
public static class PrometheusStorage {
|
||||
String url;
|
||||
String pushGatewayUrl;
|
||||
String pushGatewayUsername;
|
||||
String pushGatewayPassword;
|
||||
String pushGatewayJobName;
|
||||
Boolean remoteWrite;
|
||||
}
|
||||
|
||||
@Data
|
||||
|
@ -171,7 +197,7 @@ public class ClustersProperties {
|
|||
private void setMetricsDefaults() {
|
||||
for (Cluster cluster : clusters) {
|
||||
if (cluster.getMetrics() != null && !StringUtils.hasText(cluster.getMetrics().getType())) {
|
||||
cluster.getMetrics().setType(MetricsConfig.JMX_METRICS_TYPE);
|
||||
cluster.getMetrics().setType(JMX_METRICS_TYPE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@ import org.springframework.http.HttpMethod;
|
|||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.server.reactive.ServerHttpRequest;
|
||||
import org.springframework.http.server.reactive.ServerHttpResponse;
|
||||
import org.springframework.web.reactive.config.CorsRegistry;
|
||||
import org.springframework.web.reactive.config.WebFluxConfigurer;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import org.springframework.web.server.WebFilter;
|
||||
import org.springframework.web.server.WebFilterChain;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import java.beans.Transient;
|
||||
import javax.annotation.PostConstruct;
|
||||
import lombok.Data;
|
||||
import org.springframework.boot.context.properties.ConfigurationProperties;
|
||||
|
|
|
@ -18,7 +18,8 @@ abstract class AbstractAuthSecurityConfig {
|
|||
"/login",
|
||||
"/logout",
|
||||
"/oauth2/**",
|
||||
"/static/**"
|
||||
"/static/**",
|
||||
"/metrics"
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.config.auth;
|
||||
|
||||
import java.util.Collection;
|
||||
import lombok.Value;
|
||||
|
||||
public record AuthenticatedUser(String principal, Collection<String> groups) {
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.user.OAuth2User;
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.config.auth;
|
|||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.springframework.security.core.GrantedAuthority;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcIdToken;
|
||||
import org.springframework.security.oauth2.core.oidc.OidcUserInfo;
|
||||
|
|
|
@ -13,6 +13,7 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -37,7 +38,7 @@ public class AccessController implements AuthorizationApi {
|
|||
.filter(role -> user.groups().contains(role.getName()))
|
||||
.map(role -> mapPermissions(role.getPermissions(), role.getClusters()))
|
||||
.flatMap(Collection::stream)
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
)
|
||||
.switchIfEmpty(Mono.just(Collections.emptyList()));
|
||||
|
||||
|
@ -69,10 +70,10 @@ public class AccessController implements AuthorizationApi {
|
|||
.map(String::toUpperCase)
|
||||
.map(this::mapAction)
|
||||
.filter(Objects::nonNull)
|
||||
.toList());
|
||||
.collect(Collectors.toList()));
|
||||
return dto;
|
||||
})
|
||||
.toList();
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
|
|
@ -82,13 +82,12 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
.build();
|
||||
return validateAccess(context)
|
||||
.then(restartRequestDto)
|
||||
.doOnNext(restartDto -> {
|
||||
var newConfig = MAPPER.fromDto(restartDto.getConfig().getProperties());
|
||||
dynamicConfigOperations.persist(newConfig);
|
||||
.<ResponseEntity<Void>>map(dto -> {
|
||||
dynamicConfigOperations.persist(MAPPER.fromDto(dto.getConfig().getProperties()));
|
||||
restarter.requestRestart();
|
||||
return ResponseEntity.ok().build();
|
||||
})
|
||||
.doOnEach(sig -> audit(context, sig))
|
||||
.doOnSuccess(dto -> restarter.requestRestart())
|
||||
.map(dto -> ResponseEntity.ok().build());
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -117,8 +116,8 @@ public class ApplicationConfigController extends AbstractController implements A
|
|||
return validateAccess(context)
|
||||
.then(configDto)
|
||||
.flatMap(config -> {
|
||||
PropertiesStructure newConfig = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = newConfig.getKafka();
|
||||
PropertiesStructure propertiesStructure = MAPPER.fromDto(config.getProperties());
|
||||
ClustersProperties clustersProperties = propertiesStructure.getKafka();
|
||||
return validateClustersConfig(clustersProperties)
|
||||
.map(validations -> new ApplicationConfigValidationDTO().clusters(validations));
|
||||
})
|
||||
|
|
|
@ -26,8 +26,6 @@ import reactor.core.publisher.Mono;
|
|||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class BrokersController extends AbstractController implements BrokersApi {
|
||||
private static final String BROKER_ID = "brokerId";
|
||||
|
||||
private final BrokerService brokerService;
|
||||
private final ClusterMapper clusterMapper;
|
||||
|
||||
|
@ -91,7 +89,7 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW)
|
||||
.operationName("getBrokerConfig")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
|
@ -110,7 +108,7 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerTopicPartitionLogDir")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
@ -130,7 +128,7 @@ public class BrokersController extends AbstractController implements BrokersApi
|
|||
.cluster(clusterName)
|
||||
.clusterConfigActions(ClusterConfigAction.VIEW, ClusterConfigAction.EDIT)
|
||||
.operationName("updateBrokerConfigByName")
|
||||
.operationParams(Map.of(BROKER_ID, id))
|
||||
.operationParams(Map.of("brokerId", id))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
|
|
@ -22,6 +22,7 @@ import com.provectus.kafka.ui.service.OffsetsResetService;
|
|||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
|
@ -199,7 +200,7 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.consumerGroups(consumerGroupConsumerGroupsPage.consumerGroups()
|
||||
.stream()
|
||||
.map(ConsumerGroupMapper::toDto)
|
||||
.toList());
|
||||
.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,79 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.GraphsApi;
|
||||
import com.provectus.kafka.ui.model.GraphDataRequestDTO;
|
||||
import com.provectus.kafka.ui.model.GraphDescriptionDTO;
|
||||
import com.provectus.kafka.ui.model.GraphDescriptionsDTO;
|
||||
import com.provectus.kafka.ui.model.GraphParameterDTO;
|
||||
import com.provectus.kafka.ui.model.PrometheusApiQueryResponseDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
import com.provectus.kafka.ui.service.graphs.GraphDescription;
|
||||
import com.provectus.kafka.ui.service.graphs.GraphsService;
|
||||
import java.time.Duration;
|
||||
import java.time.OffsetDateTime;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.mapstruct.Mapper;
|
||||
import org.mapstruct.factory.Mappers;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import prometheus.query.model.QueryResponse;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class GraphsController extends AbstractController implements GraphsApi {
|
||||
|
||||
private static final PrometheusApiMapper MAPPER = Mappers.getMapper(PrometheusApiMapper.class);
|
||||
|
||||
@Mapper
|
||||
interface PrometheusApiMapper {
|
||||
PrometheusApiQueryResponseDTO fromClientResponse(QueryResponse resp);
|
||||
}
|
||||
|
||||
private final GraphsService graphsService;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<PrometheusApiQueryResponseDTO>> getGraphData(String clusterName,
|
||||
Mono<GraphDataRequestDTO> graphDataRequestDto,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getGraphData")
|
||||
.build();
|
||||
|
||||
return accessControlService.validateAccess(context)
|
||||
.then(
|
||||
graphDataRequestDto.flatMap(req ->
|
||||
graphsService.getGraphData(
|
||||
getCluster(clusterName),
|
||||
req.getId(),
|
||||
Optional.ofNullable(req.getFrom()).map(OffsetDateTime::toInstant).orElse(null),
|
||||
Optional.ofNullable(req.getTo()).map(OffsetDateTime::toInstant).orElse(null),
|
||||
req.getParameters()
|
||||
).map(MAPPER::fromClientResponse))
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> auditService.audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<GraphDescriptionsDTO>> getGraphsList(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.operationName("getGraphsList")
|
||||
.build();
|
||||
|
||||
var graphs = graphsService.getGraphs(getCluster(clusterName));
|
||||
return accessControlService.validateAccess(context).then(
|
||||
Mono.just(ResponseEntity.ok(new GraphDescriptionsDTO().graphs(graphs.map(this::map).toList()))));
|
||||
}
|
||||
|
||||
private GraphDescriptionDTO map(GraphDescription graph) {
|
||||
return new GraphDescriptionDTO(graph.id())
|
||||
.defaultPeriod(Optional.ofNullable(graph.defaultInterval()).map(Duration::toString).orElse(null))
|
||||
.type(graph.isRange() ? GraphDescriptionDTO.TypeEnum.RANGE : GraphDescriptionDTO.TypeEnum.INSTANT)
|
||||
.parameters(graph.params().stream().map(GraphParameterDTO::new).toList());
|
||||
}
|
||||
}
|
|
@ -36,7 +36,6 @@ import reactor.core.publisher.Mono;
|
|||
public class KafkaConnectController extends AbstractController implements KafkaConnectApi {
|
||||
private static final Set<ConnectorActionDTO> RESTART_ACTIONS
|
||||
= Set.of(RESTART, RESTART_FAILED_TASKS, RESTART_ALL_TASKS);
|
||||
private static final String CONNECTOR_NAME = "connectorName";
|
||||
|
||||
private final KafkaConnectService kafkaConnectService;
|
||||
|
||||
|
@ -113,7 +112,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("deleteConnector")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectName))
|
||||
.operationParams(Map.of("connectorName", connectName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
@ -181,7 +180,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.EDIT)
|
||||
.operationName("setConnectorConfig")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
@ -208,7 +207,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(connectActions)
|
||||
.operationName("updateConnectorState")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
@ -228,7 +227,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW)
|
||||
.operationName("getConnectorTasks")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).thenReturn(
|
||||
|
@ -248,7 +247,7 @@ public class KafkaConnectController extends AbstractController implements KafkaC
|
|||
.connect(connectName)
|
||||
.connectActions(ConnectAction.VIEW, ConnectAction.RESTART)
|
||||
.operationName("restartConnectorTask")
|
||||
.operationParams(Map.of(CONNECTOR_NAME, connectorName))
|
||||
.operationParams(Map.of("connectorName", connectorName))
|
||||
.build();
|
||||
|
||||
return validateAccess(context).then(
|
||||
|
|
|
@ -24,7 +24,6 @@ import com.provectus.kafka.ui.model.rbac.permission.AuditAction;
|
|||
import com.provectus.kafka.ui.model.rbac.permission.TopicAction;
|
||||
import com.provectus.kafka.ui.service.DeserializationService;
|
||||
import com.provectus.kafka.ui.service.MessagesService;
|
||||
import com.provectus.kafka.ui.util.DynamicConfigOperations;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
|
@ -32,7 +31,6 @@ import javax.annotation.Nullable;
|
|||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
@ -49,7 +47,6 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
|
||||
private final MessagesService messagesService;
|
||||
private final DeserializationService deserializationService;
|
||||
private final DynamicConfigOperations dynamicConfigOperations;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteTopicMessages(
|
||||
|
@ -97,10 +94,6 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
.topicActions(MESSAGES_READ)
|
||||
.operationName("getTopicMessages");
|
||||
|
||||
if (StringUtils.isNoneEmpty(q) && MessageFilterTypeDTO.GROOVY_SCRIPT == filterQueryType) {
|
||||
dynamicConfigOperations.checkIfFilteringGroovyEnabled();
|
||||
}
|
||||
|
||||
if (auditService.isAuditTopic(getCluster(clusterName), topicName)) {
|
||||
contextBuilder.auditActions(AuditAction.VIEW);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.PrometheusExposeApi;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.service.StatisticsCache;
|
||||
import com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
public class PrometheusExposeController extends AbstractController implements PrometheusExposeApi {
|
||||
|
||||
private final StatisticsCache statisticsCache;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<String>> getAllMetrics(ServerWebExchange exchange) {
|
||||
return Mono.just(
|
||||
PrometheusExpose.exposeAllMetrics(
|
||||
clustersStorage.getKafkaClusters()
|
||||
.stream()
|
||||
.filter(KafkaCluster::isExposeMetricsViaPrometheusEndpoint)
|
||||
.collect(Collectors.toMap(KafkaCluster::getName, c -> statisticsCache.get(c).getMetrics()))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
}
|
|
@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.rbac.permission.SchemaAction;
|
|||
import com.provectus.kafka.ui.service.SchemaRegistryService;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -234,7 +235,7 @@ public class SchemasController extends AbstractController implements SchemasApi
|
|||
List<String> subjectsToRender = filteredSubjects.stream()
|
||||
.skip(subjectToSkip)
|
||||
.limit(pageSize)
|
||||
.toList();
|
||||
.collect(Collectors.toList());
|
||||
return schemaRegistryService.getAllLatestVersionSchemas(getCluster(clusterName), subjectsToRender)
|
||||
.map(subjs -> subjs.stream().map(kafkaSrMapper::toDto).toList())
|
||||
.map(subjs -> new SchemaSubjectsResponseDTO().pageCount(totalPages).schemas(subjs));
|
||||
|
|
|
@ -22,7 +22,6 @@ import com.provectus.kafka.ui.model.TopicConfigDTO;
|
|||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import com.provectus.kafka.ui.model.TopicsResponseDTO;
|
||||
import com.provectus.kafka.ui.model.rbac.AccessContext;
|
||||
|
@ -144,7 +143,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.map(lst -> lst.stream()
|
||||
.map(InternalTopicConfig::from)
|
||||
.map(clusterMapper::toTopicConfig)
|
||||
.toList())
|
||||
.collect(toList()))
|
||||
.map(Flux::fromIterable)
|
||||
.map(ResponseEntity::ok)
|
||||
).doOnEach(sig -> audit(context, sig));
|
||||
|
@ -208,7 +207,7 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
return topicsService.loadTopics(getCluster(clusterName), topicsPage)
|
||||
.map(topicsToRender ->
|
||||
new TopicsResponseDTO()
|
||||
.topics(topicsToRender.stream().map(clusterMapper::toTopic).toList())
|
||||
.topics(topicsToRender.stream().map(clusterMapper::toTopic).collect(toList()))
|
||||
.pageCount(totalPages));
|
||||
})
|
||||
.map(ResponseEntity::ok)
|
||||
|
@ -328,34 +327,6 @@ public class TopicsController extends AbstractController implements TopicsApi {
|
|||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<TopicProducerStateDTO>>> getActiveProducerStates(String clusterName,
|
||||
String topicName,
|
||||
ServerWebExchange exchange) {
|
||||
var context = AccessContext.builder()
|
||||
.cluster(clusterName)
|
||||
.topic(topicName)
|
||||
.topicActions(VIEW)
|
||||
.operationName("getActiveProducerStates")
|
||||
.build();
|
||||
|
||||
Comparator<TopicProducerStateDTO> ordering =
|
||||
Comparator.comparingInt(TopicProducerStateDTO::getPartition)
|
||||
.thenComparing(Comparator.comparing(TopicProducerStateDTO::getProducerId).reversed());
|
||||
|
||||
Flux<TopicProducerStateDTO> states = topicsService.getActiveProducersState(getCluster(clusterName), topicName)
|
||||
.flatMapMany(statesMap ->
|
||||
Flux.fromStream(
|
||||
statesMap.entrySet().stream()
|
||||
.flatMap(e -> e.getValue().stream().map(p -> clusterMapper.map(e.getKey().partition(), p)))
|
||||
.sorted(ordering)));
|
||||
|
||||
return validateAccess(context)
|
||||
.thenReturn(states)
|
||||
.map(ResponseEntity::ok)
|
||||
.doOnEach(sig -> audit(context, sig));
|
||||
}
|
||||
|
||||
private Comparator<InternalTopic> getComparatorForTopic(
|
||||
TopicColumnsToSortDTO orderBy) {
|
||||
var defaultComparator = Comparator.comparing(InternalTopic::getName);
|
||||
|
|
|
@ -5,6 +5,7 @@ import java.util.Collection;
|
|||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.mutable.MutableLong;
|
||||
|
@ -27,7 +28,7 @@ class OffsetsInfo {
|
|||
this(consumer,
|
||||
consumer.partitionsFor(topic).stream()
|
||||
.map(pi -> new TopicPartition(topic, pi.partition()))
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -106,7 +106,7 @@ public class GlobalErrorWebExceptionHandler extends AbstractErrorWebExceptionHan
|
|||
err.setFieldName(e.getKey());
|
||||
err.setRestrictions(List.copyOf(e.getValue()));
|
||||
return err;
|
||||
}).toList();
|
||||
}).collect(Collectors.toList());
|
||||
|
||||
var message = fieldsErrors.isEmpty()
|
||||
? exception.getMessage()
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
package com.provectus.kafka.ui.mapper;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.model.BrokerConfigDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerDiskUsageDTO;
|
||||
import com.provectus.kafka.ui.model.BrokerMetricsDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterFeature;
|
||||
|
@ -14,7 +17,6 @@ import com.provectus.kafka.ui.model.ConfigSynonymDTO;
|
|||
import com.provectus.kafka.ui.model.ConnectDTO;
|
||||
import com.provectus.kafka.ui.model.InternalBroker;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
||||
import com.provectus.kafka.ui.model.InternalClusterState;
|
||||
import com.provectus.kafka.ui.model.InternalPartition;
|
||||
import com.provectus.kafka.ui.model.InternalReplica;
|
||||
|
@ -30,12 +32,14 @@ import com.provectus.kafka.ui.model.ReplicaDTO;
|
|||
import com.provectus.kafka.ui.model.TopicConfigDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDTO;
|
||||
import com.provectus.kafka.ui.model.TopicDetailsDTO;
|
||||
import com.provectus.kafka.ui.model.TopicProducerStateDTO;
|
||||
import com.provectus.kafka.ui.service.metrics.RawMetric;
|
||||
import com.provectus.kafka.ui.service.metrics.SummarizedMetrics;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.IntStream;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.ProducerState;
|
||||
import org.apache.kafka.common.acl.AccessControlEntry;
|
||||
import org.apache.kafka.common.acl.AclBinding;
|
||||
import org.apache.kafka.common.acl.AclOperation;
|
||||
|
@ -53,21 +57,28 @@ public interface ClusterMapper {
|
|||
|
||||
ClusterStatsDTO toClusterStats(InternalClusterState clusterState);
|
||||
|
||||
@Deprecated
|
||||
default ClusterMetricsDTO toClusterMetrics(Metrics metrics) {
|
||||
return new ClusterMetricsDTO()
|
||||
.items(metrics.getSummarizedMetrics().map(this::convert).toList());
|
||||
.items(convert(new SummarizedMetrics(metrics).asStream()).toList());
|
||||
}
|
||||
|
||||
private MetricDTO convert(RawMetric rawMetric) {
|
||||
return new MetricDTO()
|
||||
.name(rawMetric.name())
|
||||
.labels(rawMetric.labels())
|
||||
.value(rawMetric.value());
|
||||
private Stream<MetricDTO> convert(Stream<MetricFamilySamples> metrics) {
|
||||
return metrics
|
||||
.flatMap(m -> m.samples.stream())
|
||||
.map(s ->
|
||||
new MetricDTO()
|
||||
.name(s.name)
|
||||
.labels(IntStream.range(0, s.labelNames.size())
|
||||
.boxed()
|
||||
//collecting to map, keeping order
|
||||
.collect(toMap(s.labelNames::get, s.labelValues::get, (m1, m2) -> null, LinkedHashMap::new)))
|
||||
.value(BigDecimal.valueOf(s.value))
|
||||
);
|
||||
}
|
||||
|
||||
default BrokerMetricsDTO toBrokerMetrics(List<RawMetric> metrics) {
|
||||
return new BrokerMetricsDTO()
|
||||
.metrics(metrics.stream().map(this::convert).toList());
|
||||
default BrokerMetricsDTO toBrokerMetrics(List<MetricFamilySamples> metrics) {
|
||||
return new BrokerMetricsDTO().metrics(convert(metrics.stream()).toList());
|
||||
}
|
||||
|
||||
@Mapping(target = "isSensitive", source = "sensitive")
|
||||
|
@ -108,26 +119,7 @@ public interface ClusterMapper {
|
|||
List<ClusterDTO.FeaturesEnum> toFeaturesEnum(List<ClusterFeature> features);
|
||||
|
||||
default List<PartitionDTO> map(Map<Integer, InternalPartition> map) {
|
||||
return map.values().stream().map(this::toPartition).toList();
|
||||
}
|
||||
|
||||
default BrokerDiskUsageDTO map(Integer id, InternalBrokerDiskUsage internalBrokerDiskUsage) {
|
||||
final BrokerDiskUsageDTO brokerDiskUsage = new BrokerDiskUsageDTO();
|
||||
brokerDiskUsage.setBrokerId(id);
|
||||
brokerDiskUsage.segmentCount((int) internalBrokerDiskUsage.getSegmentCount());
|
||||
brokerDiskUsage.segmentSize(internalBrokerDiskUsage.getSegmentSize());
|
||||
return brokerDiskUsage;
|
||||
}
|
||||
|
||||
default TopicProducerStateDTO map(int partition, ProducerState state) {
|
||||
return new TopicProducerStateDTO()
|
||||
.partition(partition)
|
||||
.producerId(state.producerId())
|
||||
.producerEpoch(state.producerEpoch())
|
||||
.lastSequence(state.lastSequence())
|
||||
.lastTimestampMs(state.lastTimestamp())
|
||||
.coordinatorEpoch(state.coordinatorEpoch().stream().boxed().findAny().orElse(null))
|
||||
.currentTransactionStartOffset(state.currentTransactionStartOffset().stream().boxed().findAny().orElse(null));
|
||||
return map.values().stream().map(this::toPartition).collect(toList());
|
||||
}
|
||||
|
||||
static KafkaAclDTO.OperationEnum mapAclOperation(AclOperation operation) {
|
||||
|
|
|
@ -7,6 +7,8 @@ import java.util.Collection;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.clients.admin.LogDirDescription;
|
||||
import org.apache.kafka.clients.admin.ReplicaInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.protocol.Errors;
|
||||
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||
|
@ -16,51 +18,49 @@ import org.springframework.stereotype.Component;
|
|||
public class DescribeLogDirsMapper {
|
||||
|
||||
public List<BrokersLogdirsDTO> toBrokerLogDirsList(
|
||||
Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> logDirsInfo) {
|
||||
Map<Integer, Map<String, LogDirDescription>> logDirsInfo) {
|
||||
|
||||
return logDirsInfo.entrySet().stream().map(
|
||||
mapEntry -> mapEntry.getValue().entrySet().stream()
|
||||
.map(e -> toBrokerLogDirs(mapEntry.getKey(), e.getKey(), e.getValue()))
|
||||
.toList()
|
||||
.collect(Collectors.toList())
|
||||
).flatMap(Collection::stream).collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private BrokersLogdirsDTO toBrokerLogDirs(Integer broker, String dirName,
|
||||
DescribeLogDirsResponse.LogDirInfo logDirInfo) {
|
||||
LogDirDescription logDirInfo) {
|
||||
BrokersLogdirsDTO result = new BrokersLogdirsDTO();
|
||||
result.setName(dirName);
|
||||
if (logDirInfo.error != null && logDirInfo.error != Errors.NONE) {
|
||||
result.setError(logDirInfo.error.message());
|
||||
if (logDirInfo.error() != null) {
|
||||
result.setError(logDirInfo.error().getMessage());
|
||||
}
|
||||
var topics = logDirInfo.replicaInfos.entrySet().stream()
|
||||
var topics = logDirInfo.replicaInfos().entrySet().stream()
|
||||
.collect(Collectors.groupingBy(e -> e.getKey().topic())).entrySet().stream()
|
||||
.map(e -> toTopicLogDirs(broker, e.getKey(), e.getValue()))
|
||||
.toList();
|
||||
.collect(Collectors.toList());
|
||||
result.setTopics(topics);
|
||||
return result;
|
||||
}
|
||||
|
||||
private BrokerTopicLogdirsDTO toTopicLogDirs(Integer broker, String name,
|
||||
List<Map.Entry<TopicPartition,
|
||||
DescribeLogDirsResponse.ReplicaInfo>> partitions) {
|
||||
List<Map.Entry<TopicPartition, ReplicaInfo>> partitions) {
|
||||
BrokerTopicLogdirsDTO topic = new BrokerTopicLogdirsDTO();
|
||||
topic.setName(name);
|
||||
topic.setPartitions(
|
||||
partitions.stream().map(
|
||||
e -> topicPartitionLogDir(
|
||||
broker, e.getKey().partition(), e.getValue())).toList()
|
||||
broker, e.getKey().partition(), e.getValue())).collect(Collectors.toList())
|
||||
);
|
||||
return topic;
|
||||
}
|
||||
|
||||
private BrokerTopicPartitionLogdirDTO topicPartitionLogDir(Integer broker, Integer partition,
|
||||
DescribeLogDirsResponse.ReplicaInfo
|
||||
replicaInfo) {
|
||||
ReplicaInfo replicaInfo) {
|
||||
BrokerTopicPartitionLogdirDTO logDir = new BrokerTopicPartitionLogdirDTO();
|
||||
logDir.setBroker(broker);
|
||||
logDir.setPartition(partition);
|
||||
logDir.setSize(replicaInfo.size);
|
||||
logDir.setOffsetLag(replicaInfo.offsetLag);
|
||||
logDir.setSize(replicaInfo.size());
|
||||
logDir.setOffsetLag(replicaInfo.offsetLag());
|
||||
return logDir;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,12 +21,12 @@ public class InternalBroker {
|
|||
|
||||
public InternalBroker(Node node,
|
||||
PartitionDistributionStats partitionDistribution,
|
||||
Statistics statistics) {
|
||||
Metrics metrics) {
|
||||
this.id = node.id();
|
||||
this.host = node.host();
|
||||
this.port = node.port();
|
||||
this.bytesInPerSec = statistics.getMetrics().getBrokerBytesInPerSec().get(node.id());
|
||||
this.bytesOutPerSec = statistics.getMetrics().getBrokerBytesOutPerSec().get(node.id());
|
||||
this.bytesInPerSec = metrics.getIoRates().brokerBytesInPerSec().get(node.id());
|
||||
this.bytesOutPerSec = metrics.getIoRates().brokerBytesOutPerSec().get(node.id());
|
||||
this.partitionsLeader = partitionDistribution.getPartitionLeaders().get(node);
|
||||
this.partitions = partitionDistribution.getPartitionsCount().get(node);
|
||||
this.inSyncPartitions = partitionDistribution.getInSyncPartitions().get(node);
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public class InternalBrokerDiskUsage {
|
||||
private final long segmentCount;
|
||||
private final long segmentSize;
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public class InternalClusterMetrics {
|
||||
|
||||
public static InternalClusterMetrics empty() {
|
||||
return InternalClusterMetrics.builder()
|
||||
.brokers(List.of())
|
||||
.topics(Map.of())
|
||||
.status(ServerStatusDTO.OFFLINE)
|
||||
.internalBrokerMetrics(Map.of())
|
||||
.metrics(List.of())
|
||||
.version("unknown")
|
||||
.build();
|
||||
}
|
||||
|
||||
private final String version;
|
||||
|
||||
private final ServerStatusDTO status;
|
||||
private final Throwable lastKafkaException;
|
||||
|
||||
private final int brokerCount;
|
||||
private final int activeControllers;
|
||||
private final List<Integer> brokers;
|
||||
|
||||
private final int topicCount;
|
||||
private final Map<String, InternalTopic> topics;
|
||||
|
||||
// partitions stats
|
||||
private final int underReplicatedPartitionCount;
|
||||
private final int onlinePartitionCount;
|
||||
private final int offlinePartitionCount;
|
||||
private final int inSyncReplicasCount;
|
||||
private final int outOfSyncReplicasCount;
|
||||
|
||||
// log dir stats
|
||||
@Nullable // will be null if log dir collection disabled
|
||||
private final Map<Integer, InternalBrokerDiskUsage> internalBrokerDiskUsage;
|
||||
|
||||
// metrics from metrics collector
|
||||
private final BigDecimal bytesInPerSec;
|
||||
private final BigDecimal bytesOutPerSec;
|
||||
private final Map<Integer, BrokerMetrics> internalBrokerMetrics;
|
||||
private final List<MetricDTO> metrics;
|
||||
|
||||
}
|
|
@ -36,39 +36,42 @@ public class InternalClusterState {
|
|||
.message(e.getMessage())
|
||||
.stackTrace(Throwables.getStackTraceAsString(e)))
|
||||
.orElse(null);
|
||||
topicCount = statistics.getTopicDescriptions().size();
|
||||
topicCount = (int) statistics.topicDescriptions().count();
|
||||
brokerCount = statistics.getClusterDescription().getNodes().size();
|
||||
activeControllers = Optional.ofNullable(statistics.getClusterDescription().getController())
|
||||
.map(Node::id)
|
||||
.orElse(null);
|
||||
version = statistics.getVersion();
|
||||
|
||||
if (statistics.getLogDirInfo() != null) {
|
||||
diskUsage = statistics.getLogDirInfo().getBrokerStats().entrySet().stream()
|
||||
.map(e -> new BrokerDiskUsageDTO()
|
||||
.brokerId(e.getKey())
|
||||
.segmentSize(e.getValue().getSegmentSize())
|
||||
.segmentCount(e.getValue().getSegmentsCount()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
diskUsage = statistics.getClusterState().getNodesStates().values().stream()
|
||||
.filter(n -> n.segmentStats() != null)
|
||||
.map(n -> new BrokerDiskUsageDTO()
|
||||
.brokerId(n.id())
|
||||
.segmentSize(n.segmentStats().getSegmentSize())
|
||||
.segmentCount(n.segmentStats().getSegmentsCount()))
|
||||
.collect(Collectors.toList());
|
||||
|
||||
features = statistics.getFeatures();
|
||||
|
||||
bytesInPerSec = statistics
|
||||
.getMetrics()
|
||||
.getBrokerBytesInPerSec()
|
||||
.values().stream()
|
||||
.getIoRates()
|
||||
.brokerBytesInPerSec()
|
||||
.values()
|
||||
.stream()
|
||||
.reduce(BigDecimal::add)
|
||||
.orElse(null);
|
||||
|
||||
bytesOutPerSec = statistics
|
||||
.getMetrics()
|
||||
.getBrokerBytesOutPerSec()
|
||||
.values().stream()
|
||||
.getIoRates()
|
||||
.brokerBytesOutPerSec()
|
||||
.values()
|
||||
.stream()
|
||||
.reduce(BigDecimal::add)
|
||||
.orElse(null);
|
||||
|
||||
var partitionsStats = new PartitionsStats(statistics.getTopicDescriptions().values());
|
||||
var partitionsStats = new PartitionsStats(statistics.topicDescriptions().toList());
|
||||
onlinePartitionCount = partitionsStats.getOnlinePartitionCount();
|
||||
offlinePartitionCount = partitionsStats.getOfflinePartitionCount();
|
||||
inSyncReplicasCount = partitionsStats.getInSyncReplicasCount();
|
||||
|
|
|
@ -3,14 +3,17 @@ package com.provectus.kafka.ui.model;
|
|||
import static java.util.stream.Collectors.collectingAndThen;
|
||||
import static java.util.stream.Collectors.groupingBy;
|
||||
import static java.util.stream.Collectors.summarizingLong;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
import jakarta.annotation.Nullable;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.LongSummaryStatistics;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.LongAdder;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.Value;
|
||||
import org.apache.kafka.clients.admin.LogDirDescription;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuple3;
|
||||
import reactor.util.function.Tuples;
|
||||
|
@ -19,30 +22,37 @@ import reactor.util.function.Tuples;
|
|||
public class InternalLogDirStats {
|
||||
|
||||
@Value
|
||||
@RequiredArgsConstructor
|
||||
public static class SegmentStats {
|
||||
long segmentSize;
|
||||
int segmentsCount;
|
||||
Long segmentSize;
|
||||
Integer segmentsCount;
|
||||
|
||||
public SegmentStats(LongSummaryStatistics s) {
|
||||
segmentSize = s.getSum();
|
||||
segmentsCount = (int) s.getCount();
|
||||
private SegmentStats(LongSummaryStatistics s) {
|
||||
this(s.getSum(), (int) s.getCount());
|
||||
}
|
||||
}
|
||||
|
||||
public record LogDirSpaceStats(@Nullable Long totalBytes,
|
||||
@Nullable Long usableBytes,
|
||||
Map<String, Long> totalPerDir,
|
||||
Map<String, Long> usablePerDir) {
|
||||
}
|
||||
|
||||
Map<TopicPartition, SegmentStats> partitionsStats;
|
||||
Map<String, SegmentStats> topicStats;
|
||||
Map<Integer, SegmentStats> brokerStats;
|
||||
Map<Integer, LogDirSpaceStats> brokerDirsStats;
|
||||
|
||||
public static InternalLogDirStats empty() {
|
||||
return new InternalLogDirStats(Map.of());
|
||||
}
|
||||
|
||||
public InternalLogDirStats(Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>> log) {
|
||||
public InternalLogDirStats(Map<Integer, Map<String, LogDirDescription>> logsInfo) {
|
||||
final List<Tuple3<Integer, TopicPartition, Long>> topicPartitions =
|
||||
log.entrySet().stream().flatMap(b ->
|
||||
logsInfo.entrySet().stream().flatMap(b ->
|
||||
b.getValue().entrySet().stream().flatMap(topicMap ->
|
||||
topicMap.getValue().replicaInfos.entrySet().stream()
|
||||
.map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size))
|
||||
topicMap.getValue().replicaInfos().entrySet().stream()
|
||||
.map(e -> Tuples.of(b.getKey(), e.getKey(), e.getValue().size()))
|
||||
)
|
||||
).toList();
|
||||
|
||||
|
@ -64,5 +74,34 @@ public class InternalLogDirStats {
|
|||
Tuple2::getT1,
|
||||
collectingAndThen(
|
||||
summarizingLong(Tuple3::getT3), SegmentStats::new)));
|
||||
|
||||
brokerDirsStats = calculateSpaceStats(logsInfo);
|
||||
}
|
||||
|
||||
private static Map<Integer, LogDirSpaceStats> calculateSpaceStats(
|
||||
Map<Integer, Map<String, LogDirDescription>> logsInfo) {
|
||||
|
||||
var stats = new HashMap<Integer, LogDirSpaceStats>();
|
||||
logsInfo.forEach((brokerId, logDirStats) -> {
|
||||
Map<String, Long> totalBytes = new HashMap<>();
|
||||
Map<String, Long> usableBytes = new HashMap<>();
|
||||
logDirStats.forEach((logDir, descr) -> {
|
||||
if (descr.error() == null) {
|
||||
return;
|
||||
}
|
||||
descr.totalBytes().ifPresent(b -> totalBytes.merge(logDir, b, Long::sum));
|
||||
descr.usableBytes().ifPresent(b -> usableBytes.merge(logDir, b, Long::sum));
|
||||
});
|
||||
stats.put(
|
||||
brokerId,
|
||||
new LogDirSpaceStats(
|
||||
totalBytes.isEmpty() ? null : totalBytes.values().stream().mapToLong(i -> i).sum(),
|
||||
usableBytes.isEmpty() ? null : usableBytes.values().stream().mapToLong(i -> i).sum(),
|
||||
totalBytes,
|
||||
usableBytes
|
||||
)
|
||||
);
|
||||
});
|
||||
return stats;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ import com.google.common.collect.HashBasedTable;
|
|||
import com.google.common.collect.Table;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Value;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
|
@ -30,4 +31,11 @@ public class InternalPartitionsOffsets {
|
|||
return Optional.ofNullable(offsets.get(topic, partition));
|
||||
}
|
||||
|
||||
public Map<Integer, Long> topicOffsets(String topic, boolean earliest) {
|
||||
return offsets.row(topic)
|
||||
.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> earliest ? e.getValue().earliest : e.getValue().getLatest()));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import java.util.Map;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public class InternalSegmentSizeDto {
|
||||
|
||||
private final Map<String, InternalTopic> internalTopicWithSegmentSize;
|
||||
private final InternalClusterMetrics clusterMetricsWithSegmentSize;
|
||||
}
|
|
@ -1,23 +1,22 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import static com.provectus.kafka.ui.model.InternalLogDirStats.SegmentStats;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public class InternalTopic {
|
||||
|
||||
ClustersProperties clustersProperties;
|
||||
|
||||
// from TopicDescription
|
||||
private final String name;
|
||||
private final boolean internal;
|
||||
|
@ -44,7 +43,8 @@ public class InternalTopic {
|
|||
List<ConfigEntry> configs,
|
||||
InternalPartitionsOffsets partitionsOffsets,
|
||||
Metrics metrics,
|
||||
InternalLogDirStats logDirInfo,
|
||||
@Nullable SegmentStats segmentStats,
|
||||
@Nullable Map<Integer, SegmentStats> partitionsSegmentStats,
|
||||
@Nullable String internalTopicPrefix) {
|
||||
var topic = InternalTopic.builder();
|
||||
|
||||
|
@ -81,13 +81,12 @@ public class InternalTopic {
|
|||
partitionDto.offsetMax(offsets.getLatest());
|
||||
});
|
||||
|
||||
var segmentStats =
|
||||
logDirInfo.getPartitionsStats().get(
|
||||
new TopicPartition(topicDescription.name(), partition.partition()));
|
||||
if (segmentStats != null) {
|
||||
partitionDto.segmentCount(segmentStats.getSegmentsCount());
|
||||
partitionDto.segmentSize(segmentStats.getSegmentSize());
|
||||
}
|
||||
Optional.ofNullable(partitionsSegmentStats)
|
||||
.flatMap(s -> Optional.ofNullable(s.get(partition.partition())))
|
||||
.ifPresent(stats -> {
|
||||
partitionDto.segmentCount(stats.getSegmentsCount());
|
||||
partitionDto.segmentSize(stats.getSegmentSize());
|
||||
});
|
||||
|
||||
return partitionDto.build();
|
||||
})
|
||||
|
@ -108,14 +107,14 @@ public class InternalTopic {
|
|||
: topicDescription.partitions().get(0).replicas().size()
|
||||
);
|
||||
|
||||
var segmentStats = logDirInfo.getTopicStats().get(topicDescription.name());
|
||||
if (segmentStats != null) {
|
||||
topic.segmentCount(segmentStats.getSegmentsCount());
|
||||
topic.segmentSize(segmentStats.getSegmentSize());
|
||||
}
|
||||
Optional.ofNullable(segmentStats)
|
||||
.ifPresent(stats -> {
|
||||
topic.segmentCount(stats.getSegmentsCount());
|
||||
topic.segmentSize(stats.getSegmentSize());
|
||||
});
|
||||
|
||||
topic.bytesInPerSec(metrics.getTopicBytesInPerSec().get(topicDescription.name()));
|
||||
topic.bytesOutPerSec(metrics.getTopicBytesOutPerSec().get(topicDescription.name()));
|
||||
topic.bytesInPerSec(metrics.getIoRates().topicBytesInPerSec().get(topicDescription.name()));
|
||||
topic.bytesOutPerSec(metrics.getIoRates().topicBytesOutPerSec().get(topicDescription.name()));
|
||||
|
||||
topic.topicConfigs(
|
||||
configs.stream().map(InternalTopicConfig::from).collect(Collectors.toList()));
|
||||
|
|
|
@ -5,6 +5,7 @@ import com.provectus.kafka.ui.connect.api.KafkaConnectClientApi;
|
|||
import com.provectus.kafka.ui.emitter.PollingSettings;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
|
||||
import com.provectus.kafka.ui.service.masking.DataMasking;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.MetricsScrapping;
|
||||
import com.provectus.kafka.ui.sr.api.KafkaSrClientApi;
|
||||
import com.provectus.kafka.ui.util.ReactiveFailover;
|
||||
import java.util.Map;
|
||||
|
@ -13,6 +14,7 @@ import lombok.AccessLevel;
|
|||
import lombok.AllArgsConstructor;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
import prometheus.query.api.PrometheusClientApi;
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
|
@ -25,10 +27,12 @@ public class KafkaCluster {
|
|||
private final String bootstrapServers;
|
||||
private final Properties properties;
|
||||
private final boolean readOnly;
|
||||
private final MetricsConfig metricsConfig;
|
||||
private final boolean exposeMetricsViaPrometheusEndpoint;
|
||||
private final DataMasking masking;
|
||||
private final PollingSettings pollingSettings;
|
||||
private final ReactiveFailover<KafkaSrClientApi> schemaRegistryClient;
|
||||
private final Map<String, ReactiveFailover<KafkaConnectClientApi>> connectsClients;
|
||||
private final ReactiveFailover<KsqlApiClient> ksqlClient;
|
||||
private final MetricsScrapping metricsScrapping;
|
||||
private final ReactiveFailover<PrometheusClientApi> prometheusStorageClient;
|
||||
}
|
||||
|
|
|
@ -1,13 +1,11 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
|
||||
import com.provectus.kafka.ui.service.metrics.RawMetric;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.inferred.InferredMetrics;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.Builder;
|
||||
import lombok.Value;
|
||||
|
||||
|
@ -16,28 +14,32 @@ import lombok.Value;
|
|||
@Value
|
||||
public class Metrics {
|
||||
|
||||
Map<Integer, BigDecimal> brokerBytesInPerSec;
|
||||
Map<Integer, BigDecimal> brokerBytesOutPerSec;
|
||||
Map<String, BigDecimal> topicBytesInPerSec;
|
||||
Map<String, BigDecimal> topicBytesOutPerSec;
|
||||
Map<Integer, List<RawMetric>> perBrokerMetrics;
|
||||
IoRates ioRates;
|
||||
InferredMetrics inferredMetrics;
|
||||
Map<Integer, List<MetricFamilySamples>> perBrokerScrapedMetrics;
|
||||
|
||||
public static Metrics empty() {
|
||||
return Metrics.builder()
|
||||
.brokerBytesInPerSec(Map.of())
|
||||
.brokerBytesOutPerSec(Map.of())
|
||||
.topicBytesInPerSec(Map.of())
|
||||
.topicBytesOutPerSec(Map.of())
|
||||
.perBrokerMetrics(Map.of())
|
||||
.ioRates(IoRates.empty())
|
||||
.perBrokerScrapedMetrics(Map.of())
|
||||
.inferredMetrics(InferredMetrics.empty())
|
||||
.build();
|
||||
}
|
||||
|
||||
public Stream<RawMetric> getSummarizedMetrics() {
|
||||
return perBrokerMetrics.values().stream()
|
||||
.flatMap(Collection::stream)
|
||||
.collect(toMap(RawMetric::identityKey, m -> m, (m1, m2) -> m1.copyWithValue(m1.value().add(m2.value()))))
|
||||
.values()
|
||||
.stream();
|
||||
@Builder
|
||||
public record IoRates(Map<Integer, BigDecimal> brokerBytesInPerSec,
|
||||
Map<Integer, BigDecimal> brokerBytesOutPerSec,
|
||||
Map<String, BigDecimal> topicBytesInPerSec,
|
||||
Map<String, BigDecimal> topicBytesOutPerSec) {
|
||||
|
||||
static IoRates empty() {
|
||||
return IoRates.builder()
|
||||
.brokerBytesOutPerSec(Map.of())
|
||||
.brokerBytesInPerSec(Map.of())
|
||||
.topicBytesOutPerSec(Map.of())
|
||||
.topicBytesInPerSec(Map.of())
|
||||
.build();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import lombok.AccessLevel;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
@AllArgsConstructor(access = AccessLevel.PRIVATE)
|
||||
public class MetricsConfig {
|
||||
public static final String JMX_METRICS_TYPE = "JMX";
|
||||
public static final String PROMETHEUS_METRICS_TYPE = "PROMETHEUS";
|
||||
|
||||
private final String type;
|
||||
private final Integer port;
|
||||
private final boolean ssl;
|
||||
private final String username;
|
||||
private final String password;
|
||||
private final String keystoreLocation;
|
||||
private final String keystorePassword;
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import static com.provectus.kafka.ui.config.ClustersProperties.KeystoreConfig;
|
||||
import static com.provectus.kafka.ui.config.ClustersProperties.TruststoreConfig;
|
||||
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import jakarta.annotation.Nullable;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import lombok.Builder;
|
||||
import lombok.Value;
|
||||
|
||||
@Value
|
||||
@Builder
|
||||
public class MetricsScrapeProperties {
|
||||
public static final String JMX_METRICS_TYPE = "JMX";
|
||||
public static final String PROMETHEUS_METRICS_TYPE = "PROMETHEUS";
|
||||
|
||||
Integer port;
|
||||
boolean ssl;
|
||||
String username;
|
||||
String password;
|
||||
|
||||
@Nullable
|
||||
KeystoreConfig keystoreConfig;
|
||||
|
||||
@Nullable
|
||||
TruststoreConfig truststoreConfig;
|
||||
|
||||
public static MetricsScrapeProperties create(ClustersProperties.Cluster cluster) {
|
||||
var metrics = Objects.requireNonNull(cluster.getMetrics());
|
||||
return MetricsScrapeProperties.builder()
|
||||
.port(metrics.getPort())
|
||||
.ssl(Optional.ofNullable(metrics.getSsl()).orElse(false))
|
||||
.username(metrics.getUsername())
|
||||
.password(metrics.getPassword())
|
||||
.truststoreConfig(cluster.getSsl())
|
||||
.keystoreConfig(
|
||||
metrics.getKeystoreLocation() != null
|
||||
? new KeystoreConfig(metrics.getKeystoreLocation(), metrics.getKeystorePassword())
|
||||
: null
|
||||
)
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
|
@ -1,14 +1,17 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
|
||||
import java.math.BigDecimal;
|
||||
import java.math.RoundingMode;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
import lombok.Getter;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.mutable.MutableInt;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartitionInfo;
|
||||
|
@ -29,15 +32,19 @@ public class PartitionDistributionStats {
|
|||
private final boolean skewCanBeCalculated;
|
||||
|
||||
public static PartitionDistributionStats create(Statistics stats) {
|
||||
return create(stats, MIN_PARTITIONS_FOR_SKEW_CALCULATION);
|
||||
return create(
|
||||
stats.topicDescriptions().toList(),
|
||||
MIN_PARTITIONS_FOR_SKEW_CALCULATION
|
||||
);
|
||||
}
|
||||
|
||||
static PartitionDistributionStats create(Statistics stats, int minPartitionsForSkewCalculation) {
|
||||
static PartitionDistributionStats create(List<TopicDescription> topicDescriptions,
|
||||
int minPartitionsForSkewCalculation) {
|
||||
var partitionLeaders = new HashMap<Node, Integer>();
|
||||
var partitionsReplicated = new HashMap<Node, Integer>();
|
||||
var isr = new HashMap<Node, Integer>();
|
||||
int partitionsCnt = 0;
|
||||
for (TopicDescription td : stats.getTopicDescriptions().values()) {
|
||||
for (TopicDescription td : topicDescriptions) {
|
||||
for (TopicPartitionInfo tp : td.partitions()) {
|
||||
partitionsCnt++;
|
||||
tp.replicas().forEach(r -> incr(partitionsReplicated, r));
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import com.provectus.kafka.ui.service.ReactiveAdminClient;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.Builder;
|
||||
import lombok.Value;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
|
@ -18,9 +20,7 @@ public class Statistics {
|
|||
List<ClusterFeature> features;
|
||||
ReactiveAdminClient.ClusterDescription clusterDescription;
|
||||
Metrics metrics;
|
||||
InternalLogDirStats logDirInfo;
|
||||
Map<String, TopicDescription> topicDescriptions;
|
||||
Map<String, List<ConfigEntry>> topicConfigs;
|
||||
ScrapedClusterState clusterState;
|
||||
|
||||
public static Statistics empty() {
|
||||
return builder()
|
||||
|
@ -30,9 +30,12 @@ public class Statistics {
|
|||
.clusterDescription(
|
||||
new ReactiveAdminClient.ClusterDescription(null, null, List.of(), Set.of()))
|
||||
.metrics(Metrics.empty())
|
||||
.logDirInfo(InternalLogDirStats.empty())
|
||||
.topicDescriptions(Map.of())
|
||||
.topicConfigs(Map.of())
|
||||
.clusterState(ScrapedClusterState.empty())
|
||||
.build();
|
||||
}
|
||||
|
||||
public Stream<TopicDescription> topicDescriptions() {
|
||||
return clusterState.getTopicStates().values().stream().map(ScrapedClusterState.TopicState::description);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -52,8 +52,6 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public static final class AccessContextBuilder {
|
||||
private static final String ACTIONS_NOT_PRESENT = "actions not present";
|
||||
|
||||
private Collection<ApplicationConfigAction> applicationConfigActions = Collections.emptySet();
|
||||
private String cluster;
|
||||
private Collection<ClusterConfigAction> clusterConfigActions = Collections.emptySet();
|
||||
|
@ -77,7 +75,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder applicationConfigActions(ApplicationConfigAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.applicationConfigActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -88,7 +86,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder clusterConfigActions(ClusterConfigAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.clusterConfigActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -99,7 +97,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder topicActions(TopicAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.topicActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -110,7 +108,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder consumerGroupActions(ConsumerGroupAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.consumerGroupActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -121,7 +119,7 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder connectActions(ConnectAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.connectActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
@ -137,25 +135,25 @@ public class AccessContext {
|
|||
}
|
||||
|
||||
public AccessContextBuilder schemaActions(SchemaAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.schemaActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder ksqlActions(KsqlAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.ksqlActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder aclActions(AclAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.aclActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public AccessContextBuilder auditActions(AuditAction... actions) {
|
||||
Assert.isTrue(actions.length > 0, ACTIONS_NOT_PRESENT);
|
||||
Assert.isTrue(actions.length > 0, "actions not present");
|
||||
this.auditActions = List.of(actions);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import javax.annotation.Nullable;
|
|||
import lombok.EqualsAndHashCode;
|
||||
import lombok.Getter;
|
||||
import lombok.ToString;
|
||||
import org.apache.commons.collections4.CollectionUtils;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.springframework.util.Assert;
|
||||
|
||||
@Getter
|
||||
|
|
|
@ -6,6 +6,7 @@ import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
|||
import java.util.Base64;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import org.apache.kafka.common.header.Headers;
|
||||
|
||||
public class Base64Serde implements BuiltInSerde {
|
||||
|
||||
|
|
|
@ -28,23 +28,6 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
|
||||
private static final JsonMapper JSON_MAPPER = createMapper();
|
||||
|
||||
private static final String ASSIGNMENT = "assignment";
|
||||
private static final String CLIENT_HOST = "client_host";
|
||||
private static final String CLIENT_ID = "client_id";
|
||||
private static final String COMMIT_TIMESTAMP = "commit_timestamp";
|
||||
private static final String CURRENT_STATE_TIMESTAMP = "current_state_timestamp";
|
||||
private static final String GENERATION = "generation";
|
||||
private static final String LEADER = "leader";
|
||||
private static final String MEMBERS = "members";
|
||||
private static final String MEMBER_ID = "member_id";
|
||||
private static final String METADATA = "metadata";
|
||||
private static final String OFFSET = "offset";
|
||||
private static final String PROTOCOL = "protocol";
|
||||
private static final String PROTOCOL_TYPE = "protocol_type";
|
||||
private static final String REBALANCE_TIMEOUT = "rebalance_timeout";
|
||||
private static final String SESSION_TIMEOUT = "session_timeout";
|
||||
private static final String SUBSCRIPTION = "subscription";
|
||||
|
||||
public static final String TOPIC = "__consumer_offsets";
|
||||
|
||||
public static String name() {
|
||||
|
@ -133,128 +116,128 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
private Deserializer valueDeserializer() {
|
||||
final Schema commitOffsetSchemaV0 =
|
||||
new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV1 =
|
||||
new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, ""),
|
||||
new Field("expire_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV2 =
|
||||
new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV3 =
|
||||
new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("leader_epoch", Type.INT32, ""),
|
||||
new Field(METADATA, Type.STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, "")
|
||||
new Field("metadata", Type.STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, "")
|
||||
);
|
||||
|
||||
final Schema commitOffsetSchemaV4 = new Schema(
|
||||
new Field(OFFSET, Type.INT64, ""),
|
||||
new Field("offset", Type.INT64, ""),
|
||||
new Field("leader_epoch", Type.INT32, ""),
|
||||
new Field(METADATA, Type.COMPACT_STRING, ""),
|
||||
new Field(COMMIT_TIMESTAMP, Type.INT64, ""),
|
||||
new Field("metadata", Type.COMPACT_STRING, ""),
|
||||
new Field("commit_timestamp", Type.INT64, ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
);
|
||||
|
||||
final Schema metadataSchema0 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema1 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema2 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema3 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new ArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.STRING, ""),
|
||||
new Field("protocol_type", Type.STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new ArrayOf(new Schema(
|
||||
new Field("member_id", Type.STRING, ""),
|
||||
new Field("group_instance_id", Type.NULLABLE_STRING, ""),
|
||||
new Field(CLIENT_ID, Type.STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.BYTES, "")
|
||||
new Field("client_id", Type.STRING, ""),
|
||||
new Field("client_host", Type.STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.BYTES, ""),
|
||||
new Field("assignment", Type.BYTES, "")
|
||||
)), "")
|
||||
);
|
||||
|
||||
final Schema metadataSchema4 =
|
||||
new Schema(
|
||||
new Field(PROTOCOL_TYPE, Type.COMPACT_STRING, ""),
|
||||
new Field(GENERATION, Type.INT32, ""),
|
||||
new Field(PROTOCOL, Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field(LEADER, Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field(CURRENT_STATE_TIMESTAMP, Type.INT64, ""),
|
||||
new Field(MEMBERS, new CompactArrayOf(new Schema(
|
||||
new Field(MEMBER_ID, Type.COMPACT_STRING, ""),
|
||||
new Field("protocol_type", Type.COMPACT_STRING, ""),
|
||||
new Field("generation", Type.INT32, ""),
|
||||
new Field("protocol", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field("leader", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field("current_state_timestamp", Type.INT64, ""),
|
||||
new Field("members", new CompactArrayOf(new Schema(
|
||||
new Field("member_id", Type.COMPACT_STRING, ""),
|
||||
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, ""),
|
||||
new Field(CLIENT_ID, Type.COMPACT_STRING, ""),
|
||||
new Field(CLIENT_HOST, Type.COMPACT_STRING, ""),
|
||||
new Field(REBALANCE_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SESSION_TIMEOUT, Type.INT32, ""),
|
||||
new Field(SUBSCRIPTION, Type.COMPACT_BYTES, ""),
|
||||
new Field(ASSIGNMENT, Type.COMPACT_BYTES, ""),
|
||||
new Field("client_id", Type.COMPACT_STRING, ""),
|
||||
new Field("client_host", Type.COMPACT_STRING, ""),
|
||||
new Field("rebalance_timeout", Type.INT32, ""),
|
||||
new Field("session_timeout", Type.INT32, ""),
|
||||
new Field("subscription", Type.COMPACT_BYTES, ""),
|
||||
new Field("assignment", Type.COMPACT_BYTES, ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
)), ""),
|
||||
Field.TaggedFieldsSection.of()
|
||||
|
@ -266,7 +249,7 @@ public class ConsumerOffsetsSerde implements BuiltInSerde {
|
|||
short version = bb.getShort();
|
||||
// ideally, we should distinguish if value is commit or metadata
|
||||
// by checking record's key, but our current serde structure doesn't allow that.
|
||||
// so, we are trying to parse into metadata first and after into commit msg
|
||||
// so, we trying to parse into metadata first and after into commit msg
|
||||
try {
|
||||
result = toJson(
|
||||
switch (version) {
|
||||
|
|
|
@ -2,6 +2,7 @@ package com.provectus.kafka.ui.serdes.builtin;
|
|||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import java.util.Map;
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
|
||||
import io.confluent.kafka.serializers.KafkaAvroSerializer;
|
||||
import io.confluent.kafka.serializers.KafkaAvroSerializerConfig;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
class AvroSchemaRegistrySerializer extends SchemaRegistrySerializer<Object> {
|
||||
|
||||
AvroSchemaRegistrySerializer(String topic, boolean isKey,
|
||||
SchemaRegistryClient client,
|
||||
SchemaMetadata schema) {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<Object> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaAvroSerializer(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
KafkaAvroSerializerConfig.AVRO_USE_LOGICAL_TYPE_CONVERTERS_CONFIG, true,
|
||||
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object serialize(String value, ParsedSchema schema) {
|
||||
try {
|
||||
return JsonAvroConversion.convertJsonToAvro(value, ((AvroSchema) schema).rawSchema());
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchema;
|
||||
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
|
||||
import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
class JsonSchemaSchemaRegistrySerializer extends SchemaRegistrySerializer<JsonNode> {
|
||||
|
||||
private static final ObjectMapper MAPPER = new ObjectMapper();
|
||||
|
||||
JsonSchemaSchemaRegistrySerializer(String topic,
|
||||
boolean isKey,
|
||||
SchemaRegistryClient client,
|
||||
SchemaMetadata schema) {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<JsonNode> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaJsonSchemaSerializerWithoutSchemaInfer(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JsonNode serialize(String value, ParsedSchema schema) {
|
||||
try {
|
||||
JsonNode json = MAPPER.readTree(value);
|
||||
((JsonSchema) schema).validate(json);
|
||||
return json;
|
||||
} catch (JsonProcessingException e) {
|
||||
throw new ValidationException(String.format("'%s' is not valid json", value));
|
||||
} catch (org.everit.json.schema.ValidationException e) {
|
||||
throw new ValidationException(
|
||||
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
|
||||
}
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant
|
||||
private class KafkaJsonSchemaSerializerWithoutSchemaInfer
|
||||
extends KafkaJsonSchemaSerializer<JsonNode> {
|
||||
|
||||
KafkaJsonSchemaSerializerWithoutSchemaInfer(SchemaRegistryClient client) {
|
||||
super(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* Need to override original method because it tries to infer schema from input
|
||||
* by checking 'schema' json field or @Schema annotation on input class, which is not
|
||||
* possible in our case. So, we just skip all infer logic and pass schema directly.
|
||||
*/
|
||||
@Override
|
||||
public byte[] serialize(String topic, JsonNode rec) {
|
||||
return super.serializeImpl(
|
||||
super.getSubjectName(topic, isKey, rec, schema),
|
||||
rec,
|
||||
(JsonSchema) schema
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.util.JsonFormat;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig;
|
||||
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer;
|
||||
import java.util.Map;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
class ProtobufSchemaRegistrySerializer extends SchemaRegistrySerializer<Message> {
|
||||
|
||||
@SneakyThrows
|
||||
public ProtobufSchemaRegistrySerializer(String topic, boolean isKey,
|
||||
SchemaRegistryClient client, SchemaMetadata schema) {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<Message> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaProtobufSerializer<>(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
AbstractKafkaSchemaSerDeConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
AbstractKafkaSchemaSerDeConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Message serialize(String value, ParsedSchema schema) {
|
||||
ProtobufSchema protobufSchema = (ProtobufSchema) schema;
|
||||
DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
|
||||
try {
|
||||
JsonFormat.parser().merge(value, builder);
|
||||
return builder.build();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,8 +1,5 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeAvro;
|
||||
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeJson;
|
||||
import static com.provectus.kafka.ui.serdes.builtin.sr.Serialize.serializeProto;
|
||||
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.BASIC_AUTH_CREDENTIALS_SOURCE;
|
||||
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.USER_INFO_CONFIG;
|
||||
|
||||
|
@ -10,6 +7,7 @@ import com.google.common.annotations.VisibleForTesting;
|
|||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.DeserializeResult;
|
||||
import com.provectus.kafka.ui.serde.api.PropertyResolver;
|
||||
import com.provectus.kafka.ui.serde.api.RecordHeaders;
|
||||
import com.provectus.kafka.ui.serde.api.SchemaDescription;
|
||||
import com.provectus.kafka.ui.serdes.BuiltInSerde;
|
||||
import com.provectus.kafka.ui.util.jsonschema.AvroJsonSchemaConverter;
|
||||
|
@ -34,21 +32,17 @@ import java.util.Map;
|
|||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.kafka.common.config.SslConfigs;
|
||||
|
||||
|
||||
public class SchemaRegistrySerde implements BuiltInSerde {
|
||||
|
||||
private static final byte SR_PAYLOAD_MAGIC_BYTE = 0x0;
|
||||
private static final int SR_PAYLOAD_PREFIX_LENGTH = 5;
|
||||
|
||||
public static String name() {
|
||||
return "SchemaRegistry";
|
||||
}
|
||||
|
||||
private static final String SCHEMA_REGISTRY = "schemaRegistry";
|
||||
|
||||
private SchemaRegistryClient schemaRegistryClient;
|
||||
private List<String> schemaRegistryUrls;
|
||||
private String valueSchemaNameTemplate;
|
||||
|
@ -60,7 +54,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
@Override
|
||||
public boolean canBeAutoConfigured(PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
return kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
|
||||
return kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.isPresent();
|
||||
}
|
||||
|
@ -68,7 +62,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
@Override
|
||||
public void autoConfigure(PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
var urls = kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class)
|
||||
var urls = kafkaClusterProperties.getListProperty("schemaRegistry", String.class)
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
|
||||
configure(
|
||||
|
@ -94,7 +88,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
PropertyResolver kafkaClusterProperties,
|
||||
PropertyResolver globalProperties) {
|
||||
var urls = serdeProperties.getListProperty("url", String.class)
|
||||
.or(() -> kafkaClusterProperties.getListProperty(SCHEMA_REGISTRY, String.class))
|
||||
.or(() -> kafkaClusterProperties.getListProperty("schemaRegistry", String.class))
|
||||
.filter(lst -> !lst.isEmpty())
|
||||
.orElseThrow(() -> new ValidationException("No urls provided for schema registry"));
|
||||
configure(
|
||||
|
@ -225,8 +219,8 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
.convert(basePath, ((AvroSchema) parsedSchema).rawSchema())
|
||||
.toJson();
|
||||
case JSON ->
|
||||
//need to use confluent JsonSchema since it includes resolved references
|
||||
((JsonSchema) parsedSchema).rawSchema().toString();
|
||||
//need to use confluent JsonSchema since it includes resolved references
|
||||
((JsonSchema) parsedSchema).rawSchema().toString();
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -258,27 +252,35 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
@Override
|
||||
public Serializer serializer(String topic, Target type) {
|
||||
String subject = schemaSubject(topic, type);
|
||||
SchemaMetadata meta = getSchemaBySubject(subject)
|
||||
.orElseThrow(() -> new ValidationException(
|
||||
String.format("No schema for subject '%s' found", subject)));
|
||||
ParsedSchema schema = getSchemaById(meta.getId())
|
||||
.orElseThrow(() -> new IllegalStateException(
|
||||
String.format("Schema found for id %s, subject '%s'", meta.getId(), subject)));
|
||||
SchemaType schemaType = SchemaType.fromString(meta.getSchemaType())
|
||||
.orElseThrow(() -> new IllegalStateException("Unknown schema type: " + meta.getSchemaType()));
|
||||
var schema = getSchemaBySubject(subject)
|
||||
.orElseThrow(() -> new ValidationException(String.format("No schema for subject '%s' found", subject)));
|
||||
boolean isKey = type == Target.KEY;
|
||||
SchemaType schemaType = SchemaType.fromString(schema.getSchemaType())
|
||||
.orElseThrow(() -> new IllegalStateException("Unknown schema type: " + schema.getSchemaType()));
|
||||
return switch (schemaType) {
|
||||
case PROTOBUF -> input ->
|
||||
serializeProto(schemaRegistryClient, topic, type, (ProtobufSchema) schema, meta.getId(), input);
|
||||
case AVRO -> input ->
|
||||
serializeAvro((AvroSchema) schema, meta.getId(), input);
|
||||
case JSON -> input ->
|
||||
serializeJson((JsonSchema) schema, meta.getId(), input);
|
||||
case PROTOBUF -> new ProtobufSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
|
||||
case AVRO -> new AvroSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
|
||||
case JSON -> new JsonSchemaSchemaRegistrySerializer(topic, isKey, schemaRegistryClient, schema);
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public Deserializer deserializer(String topic, Target type) {
|
||||
return (headers, data) -> {
|
||||
return new SrDeserializer(topic);
|
||||
}
|
||||
|
||||
///--------------------------------------------------------------
|
||||
|
||||
private static final byte SR_RECORD_MAGIC_BYTE = (byte) 0;
|
||||
private static final int SR_RECORD_PREFIX_LENGTH = 5;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
private class SrDeserializer implements Deserializer {
|
||||
|
||||
private final String topic;
|
||||
|
||||
@Override
|
||||
public DeserializeResult deserialize(RecordHeaders headers, byte[] data) {
|
||||
var schemaId = extractSchemaIdFromMsg(data);
|
||||
SchemaType format = getMessageFormatBySchemaId(schemaId);
|
||||
MessageFormatter formatter = schemaRegistryFormatters.get(format);
|
||||
|
@ -290,7 +292,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
"type", format.name()
|
||||
)
|
||||
);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
private SchemaType getMessageFormatBySchemaId(int schemaId) {
|
||||
|
@ -302,7 +304,7 @@ public class SchemaRegistrySerde implements BuiltInSerde {
|
|||
|
||||
private int extractSchemaIdFromMsg(byte[] data) {
|
||||
ByteBuffer buffer = ByteBuffer.wrap(data);
|
||||
if (buffer.remaining() >= SR_PAYLOAD_PREFIX_LENGTH && buffer.get() == SR_PAYLOAD_MAGIC_BYTE) {
|
||||
if (buffer.remaining() > SR_RECORD_PREFIX_LENGTH && buffer.get() == SR_RECORD_MAGIC_BYTE) {
|
||||
return buffer.getInt();
|
||||
}
|
||||
throw new ValidationException(
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
abstract class SchemaRegistrySerializer<T> implements Serde.Serializer {
|
||||
protected final Serializer<T> serializer;
|
||||
protected final String topic;
|
||||
protected final boolean isKey;
|
||||
protected final ParsedSchema schema;
|
||||
|
||||
@SneakyThrows
|
||||
protected SchemaRegistrySerializer(String topic, boolean isKey, SchemaRegistryClient client,
|
||||
SchemaMetadata schema) {
|
||||
this.topic = topic;
|
||||
this.isKey = isKey;
|
||||
this.serializer = createSerializer(client);
|
||||
this.schema = client.getSchemaById(schema.getId());
|
||||
}
|
||||
|
||||
protected abstract Serializer<T> createSerializer(SchemaRegistryClient client);
|
||||
|
||||
@Override
|
||||
public byte[] serialize(String input) {
|
||||
final T read = this.serialize(input, schema);
|
||||
return this.serializer.serialize(topic, read);
|
||||
}
|
||||
|
||||
protected abstract T serialize(String value, ParsedSchema schema);
|
||||
}
|
|
@ -1,126 +0,0 @@
|
|||
package com.provectus.kafka.ui.serdes.builtin.sr;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.util.JsonFormat;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.serde.api.Serde;
|
||||
import com.provectus.kafka.ui.util.annotation.KafkaClientInternalsDependant;
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonAvroConversion;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchema;
|
||||
import io.confluent.kafka.schemaregistry.json.jackson.Jackson;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.MessageIndexes;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.serializers.protobuf.AbstractKafkaProtobufSerializer;
|
||||
import io.confluent.kafka.serializers.subject.DefaultReferenceSubjectNameStrategy;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.HashMap;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.avro.Schema;
|
||||
import org.apache.avro.io.BinaryEncoder;
|
||||
import org.apache.avro.io.DatumWriter;
|
||||
import org.apache.avro.io.EncoderFactory;
|
||||
|
||||
final class Serialize {
|
||||
|
||||
private static final byte MAGIC = 0x0;
|
||||
private static final ObjectMapper JSON_SERIALIZE_MAPPER = Jackson.newObjectMapper(); //from confluent package
|
||||
|
||||
private Serialize() {
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant("AbstractKafkaJsonSchemaSerializer::serializeImpl")
|
||||
@SneakyThrows
|
||||
static byte[] serializeJson(JsonSchema schema, int schemaId, String value) {
|
||||
JsonNode json;
|
||||
try {
|
||||
json = JSON_SERIALIZE_MAPPER.readTree(value);
|
||||
} catch (JsonProcessingException e) {
|
||||
throw new ValidationException(String.format("'%s' is not valid json", value));
|
||||
}
|
||||
try {
|
||||
schema.validate(json);
|
||||
} catch (org.everit.json.schema.ValidationException e) {
|
||||
throw new ValidationException(
|
||||
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
|
||||
}
|
||||
try (var out = new ByteArrayOutputStream()) {
|
||||
out.write(MAGIC);
|
||||
out.write(schemaId(schemaId));
|
||||
out.write(JSON_SERIALIZE_MAPPER.writeValueAsBytes(json));
|
||||
return out.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant("AbstractKafkaProtobufSerializer::serializeImpl")
|
||||
@SneakyThrows
|
||||
static byte[] serializeProto(SchemaRegistryClient srClient,
|
||||
String topic,
|
||||
Serde.Target target,
|
||||
ProtobufSchema schema,
|
||||
int schemaId,
|
||||
String input) {
|
||||
// flags are tuned like in ProtobufSerializer by default
|
||||
boolean normalizeSchema = false;
|
||||
boolean autoRegisterSchema = false;
|
||||
boolean useLatestVersion = true;
|
||||
boolean latestCompatStrict = true;
|
||||
boolean skipKnownTypes = true;
|
||||
|
||||
schema = AbstractKafkaProtobufSerializer.resolveDependencies(
|
||||
srClient, normalizeSchema, autoRegisterSchema, useLatestVersion, latestCompatStrict,
|
||||
new HashMap<>(), skipKnownTypes, new DefaultReferenceSubjectNameStrategy(),
|
||||
topic, target == Serde.Target.KEY, schema
|
||||
);
|
||||
|
||||
DynamicMessage.Builder builder = schema.newMessageBuilder();
|
||||
JsonFormat.parser().merge(input, builder);
|
||||
Message message = builder.build();
|
||||
MessageIndexes indexes = schema.toMessageIndexes(message.getDescriptorForType().getFullName(), normalizeSchema);
|
||||
try (var out = new ByteArrayOutputStream()) {
|
||||
out.write(MAGIC);
|
||||
out.write(schemaId(schemaId));
|
||||
out.write(indexes.toByteArray());
|
||||
message.writeTo(out);
|
||||
return out.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant("AbstractKafkaAvroSerializer::serializeImpl")
|
||||
@SneakyThrows
|
||||
static byte[] serializeAvro(AvroSchema schema, int schemaId, String input) {
|
||||
var avroObject = JsonAvroConversion.convertJsonToAvro(input, schema.rawSchema());
|
||||
try (var out = new ByteArrayOutputStream()) {
|
||||
out.write(MAGIC);
|
||||
out.write(schemaId(schemaId));
|
||||
Schema rawSchema = schema.rawSchema();
|
||||
if (rawSchema.getType().equals(Schema.Type.BYTES)) {
|
||||
Preconditions.checkState(
|
||||
avroObject instanceof ByteBuffer,
|
||||
"Unrecognized bytes object of type: " + avroObject.getClass().getName()
|
||||
);
|
||||
out.write(((ByteBuffer) avroObject).array());
|
||||
} else {
|
||||
boolean useLogicalTypeConverters = true;
|
||||
BinaryEncoder encoder = EncoderFactory.get().directBinaryEncoder(out, null);
|
||||
DatumWriter<Object> writer =
|
||||
(DatumWriter<Object>) AvroSchemaUtils.getDatumWriter(avroObject, rawSchema, useLogicalTypeConverters);
|
||||
writer.write(avroObject, encoder);
|
||||
encoder.flush();
|
||||
}
|
||||
return out.toByteArray();
|
||||
}
|
||||
}
|
||||
|
||||
private static byte[] schemaId(int id) {
|
||||
return ByteBuffer.allocate(Integer.BYTES).putInt(id).array();
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
|
||||
import com.provectus.kafka.ui.exception.InvalidRequestApiException;
|
||||
import com.provectus.kafka.ui.exception.LogDirNotFoundApiException;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
|
@ -11,7 +13,6 @@ import com.provectus.kafka.ui.model.InternalBroker;
|
|||
import com.provectus.kafka.ui.model.InternalBrokerConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.PartitionDistributionStats;
|
||||
import com.provectus.kafka.ui.service.metrics.RawMetric;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
|
@ -21,13 +22,13 @@ import javax.annotation.Nullable;
|
|||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.LogDirDescription;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartitionReplica;
|
||||
import org.apache.kafka.common.errors.InvalidRequestException;
|
||||
import org.apache.kafka.common.errors.LogDirNotFoundException;
|
||||
import org.apache.kafka.common.errors.TimeoutException;
|
||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
@ -72,7 +73,7 @@ public class BrokerService {
|
|||
.get(cluster)
|
||||
.flatMap(ReactiveAdminClient::describeCluster)
|
||||
.map(description -> description.getNodes().stream()
|
||||
.map(node -> new InternalBroker(node, partitionsDistribution, stats))
|
||||
.map(node -> new InternalBroker(node, partitionsDistribution, stats.getMetrics()))
|
||||
.collect(Collectors.toList()))
|
||||
.flatMapMany(Flux::fromIterable);
|
||||
}
|
||||
|
@ -110,7 +111,7 @@ public class BrokerService {
|
|||
.doOnError(e -> log.error("Unexpected error", e));
|
||||
}
|
||||
|
||||
private Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> getClusterLogDirs(
|
||||
private Mono<Map<Integer, Map<String, LogDirDescription>>> getClusterLogDirs(
|
||||
KafkaCluster cluster, List<Integer> reqBrokers) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(admin -> {
|
||||
|
@ -139,8 +140,8 @@ public class BrokerService {
|
|||
return getBrokersConfig(cluster, brokerId);
|
||||
}
|
||||
|
||||
public Mono<List<RawMetric>> getBrokerMetrics(KafkaCluster cluster, Integer brokerId) {
|
||||
return Mono.justOrEmpty(statisticsCache.get(cluster).getMetrics().getPerBrokerMetrics().get(brokerId));
|
||||
public Mono<List<MetricFamilySamples>> getBrokerMetrics(KafkaCluster cluster, Integer brokerId) {
|
||||
return Mono.justOrEmpty(statisticsCache.get(cluster).getMetrics().getPerBrokerScrapedMetrics().get(brokerId));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,9 +22,9 @@ public class ClustersStatisticsScheduler {
|
|||
.parallel()
|
||||
.runOn(Schedulers.parallel())
|
||||
.flatMap(cluster -> {
|
||||
log.debug("Start getting metrics for kafkaCluster: {}", cluster.getName());
|
||||
log.debug("Start collection statistics for cluster: {}", cluster.getName());
|
||||
return statisticsService.updateCache(cluster)
|
||||
.doOnSuccess(m -> log.debug("Metrics updated for cluster: {}", cluster.getName()));
|
||||
.doOnSuccess(m -> log.debug("Statistics updated for cluster: {}", cluster.getName()));
|
||||
})
|
||||
.then()
|
||||
.block();
|
||||
|
|
|
@ -1,5 +1,12 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateClusterConnection;
|
||||
import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateConnect;
|
||||
import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateKsql;
|
||||
import static com.provectus.kafka.ui.util.KafkaServicesValidation.validatePrometheusStore;
|
||||
import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateSchemaRegistry;
|
||||
import static com.provectus.kafka.ui.util.KafkaServicesValidation.validateTruststore;
|
||||
|
||||
import com.provectus.kafka.ui.client.RetryingKafkaConnectClient;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.config.WebclientProperties;
|
||||
|
@ -8,9 +15,10 @@ import com.provectus.kafka.ui.emitter.PollingSettings;
|
|||
import com.provectus.kafka.ui.model.ApplicationPropertyValidationDTO;
|
||||
import com.provectus.kafka.ui.model.ClusterConfigValidationDTO;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.MetricsConfig;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
|
||||
import com.provectus.kafka.ui.service.masking.DataMasking;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.MetricsScrapping;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.jmx.JmxMetricsRetriever;
|
||||
import com.provectus.kafka.ui.sr.ApiClient;
|
||||
import com.provectus.kafka.ui.sr.api.KafkaSrClientApi;
|
||||
import com.provectus.kafka.ui.util.KafkaServicesValidation;
|
||||
|
@ -22,11 +30,12 @@ import java.util.Map;
|
|||
import java.util.Optional;
|
||||
import java.util.Properties;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.StringUtils;
|
||||
import org.springframework.util.unit.DataSize;
|
||||
import org.springframework.web.reactive.function.client.WebClient;
|
||||
import prometheus.query.api.PrometheusClientApi;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
|
@ -39,11 +48,13 @@ public class KafkaClusterFactory {
|
|||
private static final DataSize DEFAULT_WEBCLIENT_BUFFER = DataSize.parse("20MB");
|
||||
|
||||
private final DataSize webClientMaxBuffSize;
|
||||
private final JmxMetricsRetriever jmxMetricsRetriever;
|
||||
|
||||
public KafkaClusterFactory(WebclientProperties webclientProperties) {
|
||||
public KafkaClusterFactory(WebclientProperties webclientProperties, JmxMetricsRetriever jmxMetricsRetriever) {
|
||||
this.webClientMaxBuffSize = Optional.ofNullable(webclientProperties.getMaxInMemoryBufferSize())
|
||||
.map(DataSize::parse)
|
||||
.orElse(DEFAULT_WEBCLIENT_BUFFER);
|
||||
this.jmxMetricsRetriever = jmxMetricsRetriever;
|
||||
}
|
||||
|
||||
public KafkaCluster create(ClustersProperties properties,
|
||||
|
@ -54,8 +65,10 @@ public class KafkaClusterFactory {
|
|||
builder.bootstrapServers(clusterProperties.getBootstrapServers());
|
||||
builder.properties(convertProperties(clusterProperties.getProperties()));
|
||||
builder.readOnly(clusterProperties.isReadOnly());
|
||||
builder.exposeMetricsViaPrometheusEndpoint(exposeMetricsViaPrometheusEndpoint(clusterProperties));
|
||||
builder.masking(DataMasking.create(clusterProperties.getMasking()));
|
||||
builder.pollingSettings(PollingSettings.create(clusterProperties, properties));
|
||||
builder.metricsScrapping(MetricsScrapping.create(clusterProperties, jmxMetricsRetriever));
|
||||
|
||||
if (schemaRegistryConfigured(clusterProperties)) {
|
||||
builder.schemaRegistryClient(schemaRegistryClient(clusterProperties));
|
||||
|
@ -66,8 +79,8 @@ public class KafkaClusterFactory {
|
|||
if (ksqlConfigured(clusterProperties)) {
|
||||
builder.ksqlClient(ksqlClient(clusterProperties));
|
||||
}
|
||||
if (metricsConfigured(clusterProperties)) {
|
||||
builder.metricsConfig(metricsConfigDataToMetricsConfig(clusterProperties.getMetrics()));
|
||||
if (prometheusStorageConfigured(clusterProperties)) {
|
||||
builder.prometheusStorageClient(prometheusStorageClient(clusterProperties));
|
||||
}
|
||||
builder.originalProperties(clusterProperties);
|
||||
return builder.build();
|
||||
|
@ -75,7 +88,7 @@ public class KafkaClusterFactory {
|
|||
|
||||
public Mono<ClusterConfigValidationDTO> validate(ClustersProperties.Cluster clusterProperties) {
|
||||
if (clusterProperties.getSsl() != null) {
|
||||
Optional<String> errMsg = KafkaServicesValidation.validateTruststore(clusterProperties.getSsl());
|
||||
Optional<String> errMsg = validateTruststore(clusterProperties.getSsl());
|
||||
if (errMsg.isPresent()) {
|
||||
return Mono.just(new ClusterConfigValidationDTO()
|
||||
.kafka(new ApplicationPropertyValidationDTO()
|
||||
|
@ -85,40 +98,51 @@ public class KafkaClusterFactory {
|
|||
}
|
||||
|
||||
return Mono.zip(
|
||||
KafkaServicesValidation.validateClusterConnection(
|
||||
validateClusterConnection(
|
||||
clusterProperties.getBootstrapServers(),
|
||||
convertProperties(clusterProperties.getProperties()),
|
||||
clusterProperties.getSsl()
|
||||
),
|
||||
schemaRegistryConfigured(clusterProperties)
|
||||
? KafkaServicesValidation.validateSchemaRegistry(
|
||||
() -> schemaRegistryClient(clusterProperties)).map(Optional::of)
|
||||
? validateSchemaRegistry(() -> schemaRegistryClient(clusterProperties)).map(Optional::of)
|
||||
: Mono.<Optional<ApplicationPropertyValidationDTO>>just(Optional.empty()),
|
||||
|
||||
ksqlConfigured(clusterProperties)
|
||||
? KafkaServicesValidation.validateKsql(() -> ksqlClient(clusterProperties)).map(Optional::of)
|
||||
? validateKsql(() -> ksqlClient(clusterProperties)).map(Optional::of)
|
||||
: Mono.<Optional<ApplicationPropertyValidationDTO>>just(Optional.empty()),
|
||||
|
||||
connectClientsConfigured(clusterProperties)
|
||||
?
|
||||
Flux.fromIterable(clusterProperties.getKafkaConnect())
|
||||
.flatMap(c ->
|
||||
KafkaServicesValidation.validateConnect(() -> connectClient(clusterProperties, c))
|
||||
validateConnect(() -> connectClient(clusterProperties, c))
|
||||
.map(r -> Tuples.of(c.getName(), r)))
|
||||
.collectMap(Tuple2::getT1, Tuple2::getT2)
|
||||
.map(Optional::of)
|
||||
:
|
||||
Mono.<Optional<Map<String, ApplicationPropertyValidationDTO>>>just(Optional.empty())
|
||||
Mono.<Optional<Map<String, ApplicationPropertyValidationDTO>>>just(Optional.empty()),
|
||||
|
||||
prometheusStorageConfigured(clusterProperties)
|
||||
? validatePrometheusStore(() -> prometheusStorageClient(clusterProperties)).map(Optional::of)
|
||||
: Mono.<Optional<ApplicationPropertyValidationDTO>>just(Optional.empty())
|
||||
|
||||
).map(tuple -> {
|
||||
var validation = new ClusterConfigValidationDTO();
|
||||
validation.kafka(tuple.getT1());
|
||||
tuple.getT2().ifPresent(validation::schemaRegistry);
|
||||
tuple.getT3().ifPresent(validation::ksqldb);
|
||||
tuple.getT4().ifPresent(validation::kafkaConnects);
|
||||
tuple.getT5().ifPresent(validation::prometheusStorage);
|
||||
return validation;
|
||||
});
|
||||
}
|
||||
|
||||
private boolean exposeMetricsViaPrometheusEndpoint(ClustersProperties.Cluster clusterProperties) {
|
||||
return Optional.ofNullable(clusterProperties.getMetrics())
|
||||
.map(m -> m.getPrometheusExpose() == null || m.getPrometheusExpose())
|
||||
.orElse(true);
|
||||
}
|
||||
|
||||
private Properties convertProperties(Map<String, Object> propertiesMap) {
|
||||
Properties properties = new Properties();
|
||||
if (propertiesMap != null) {
|
||||
|
@ -153,6 +177,28 @@ public class KafkaClusterFactory {
|
|||
);
|
||||
}
|
||||
|
||||
private ReactiveFailover<PrometheusClientApi> prometheusStorageClient(ClustersProperties.Cluster cluster) {
|
||||
WebClient webClient = new WebClientConfigurator()
|
||||
.configureSsl(cluster.getSsl(), null)
|
||||
.configureBufferSize(webClientMaxBuffSize)
|
||||
.build();
|
||||
return ReactiveFailover.create(
|
||||
parseUrlList(cluster.getMetrics().getStore().getPrometheus().getUrl()),
|
||||
url -> new PrometheusClientApi(new prometheus.query.ApiClient(webClient).setBasePath(url)),
|
||||
ReactiveFailover.CONNECTION_REFUSED_EXCEPTION_FILTER,
|
||||
"No live schemaRegistry instances available",
|
||||
ReactiveFailover.DEFAULT_RETRY_GRACE_PERIOD_MS
|
||||
);
|
||||
}
|
||||
|
||||
private boolean prometheusStorageConfigured(ClustersProperties.Cluster cluster) {
|
||||
return Optional.ofNullable(cluster.getMetrics())
|
||||
.flatMap(m -> Optional.ofNullable(m.getStore()))
|
||||
.flatMap(s -> Optional.ofNullable(s.getPrometheus()))
|
||||
.map(p -> StringUtils.hasText(p.getUrl()))
|
||||
.orElse(false);
|
||||
}
|
||||
|
||||
private boolean schemaRegistryConfigured(ClustersProperties.Cluster clusterProperties) {
|
||||
return clusterProperties.getSchemaRegistry() != null;
|
||||
}
|
||||
|
@ -202,20 +248,4 @@ public class KafkaClusterFactory {
|
|||
return clusterProperties.getMetrics() != null;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
private MetricsConfig metricsConfigDataToMetricsConfig(ClustersProperties.MetricsConfigData metricsConfigData) {
|
||||
if (metricsConfigData == null) {
|
||||
return null;
|
||||
}
|
||||
MetricsConfig.MetricsConfigBuilder builder = MetricsConfig.builder();
|
||||
builder.type(metricsConfigData.getType());
|
||||
builder.port(metricsConfigData.getPort());
|
||||
builder.ssl(Optional.ofNullable(metricsConfigData.getSsl()).orElse(false));
|
||||
builder.username(metricsConfigData.getUsername());
|
||||
builder.password(metricsConfigData.getPassword());
|
||||
builder.keystoreLocation(metricsConfigData.getKeystoreLocation());
|
||||
builder.keystorePassword(metricsConfigData.getKeystorePassword());
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -187,13 +187,18 @@ public class MessagesService {
|
|||
|
||||
public static KafkaProducer<byte[], byte[]> createProducer(KafkaCluster cluster,
|
||||
Map<String, Object> additionalProps) {
|
||||
return createProducer(cluster.getOriginalProperties(), additionalProps);
|
||||
}
|
||||
|
||||
public static KafkaProducer<byte[], byte[]> createProducer(ClustersProperties.Cluster cluster,
|
||||
Map<String, Object> additionalProps) {
|
||||
Properties properties = new Properties();
|
||||
SslPropertiesUtil.addKafkaSslProperties(cluster.getOriginalProperties().getSsl(), properties);
|
||||
SslPropertiesUtil.addKafkaSslProperties(cluster.getSsl(), properties);
|
||||
properties.putAll(additionalProps);
|
||||
properties.putAll(cluster.getProperties());
|
||||
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
properties.putAll(additionalProps);
|
||||
return new KafkaProducer<>(properties);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ import java.util.function.BiFunction;
|
|||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
|
@ -52,11 +51,11 @@ import org.apache.kafka.clients.admin.DescribeConfigsOptions;
|
|||
import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec;
|
||||
import org.apache.kafka.clients.admin.ListOffsetsResult;
|
||||
import org.apache.kafka.clients.admin.ListTopicsOptions;
|
||||
import org.apache.kafka.clients.admin.LogDirDescription;
|
||||
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.admin.ProducerState;
|
||||
import org.apache.kafka.clients.admin.RecordsToDelete;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
|
@ -79,7 +78,6 @@ import org.apache.kafka.common.errors.SecurityDisabledException;
|
|||
import org.apache.kafka.common.errors.TopicAuthorizationException;
|
||||
import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
|
||||
import org.apache.kafka.common.errors.UnsupportedVersionException;
|
||||
import org.apache.kafka.common.requests.DescribeLogDirsResponse;
|
||||
import org.apache.kafka.common.resource.ResourcePatternFilter;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
@ -380,15 +378,8 @@ public class ReactiveAdminClient implements Closeable {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> describeLogDirs() {
|
||||
return describeCluster()
|
||||
.map(d -> d.getNodes().stream().map(Node::id).collect(toList()))
|
||||
.flatMap(this::describeLogDirs);
|
||||
}
|
||||
|
||||
public Mono<Map<Integer, Map<String, DescribeLogDirsResponse.LogDirInfo>>> describeLogDirs(
|
||||
Collection<Integer> brokerIds) {
|
||||
return toMono(client.describeLogDirs(brokerIds).all())
|
||||
public Mono<Map<Integer, Map<String, LogDirDescription>>> describeLogDirs(Collection<Integer> brokerIds) {
|
||||
return toMono(client.describeLogDirs(brokerIds).allDescriptions())
|
||||
.onErrorResume(UnsupportedVersionException.class, th -> Mono.just(Map.of()))
|
||||
.onErrorResume(ClusterAuthorizationException.class, th -> Mono.just(Map.of()))
|
||||
.onErrorResume(th -> true, th -> {
|
||||
|
@ -660,21 +651,6 @@ public class ReactiveAdminClient implements Closeable {
|
|||
return toMono(client.alterReplicaLogDirs(replicaAssignment).all());
|
||||
}
|
||||
|
||||
// returns tp -> list of active producer's states (if any)
|
||||
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(String topic) {
|
||||
return describeTopic(topic)
|
||||
.map(td -> client.describeProducers(
|
||||
IntStream.range(0, td.partitions().size())
|
||||
.mapToObj(i -> new TopicPartition(topic, i))
|
||||
.toList()
|
||||
).all()
|
||||
)
|
||||
.flatMap(ReactiveAdminClient::toMono)
|
||||
.map(map -> map.entrySet().stream()
|
||||
.filter(e -> !e.getValue().activeProducers().isEmpty()) // skipping partitions without producers
|
||||
.collect(toMap(Map.Entry::getKey, e -> e.getValue().activeProducers())));
|
||||
}
|
||||
|
||||
private Mono<Void> incrementalAlterConfig(String topicName,
|
||||
List<ConfigEntry> currentConfigs,
|
||||
Map<String, String> newConfigs) {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.InternalPartitionsOffsets;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.ServerStatusDTO;
|
||||
import com.provectus.kafka.ui.model.Statistics;
|
||||
|
@ -28,38 +29,29 @@ public class StatisticsCache {
|
|||
|
||||
public synchronized void update(KafkaCluster c,
|
||||
Map<String, TopicDescription> descriptions,
|
||||
Map<String, List<ConfigEntry>> configs) {
|
||||
var metrics = get(c);
|
||||
var updatedDescriptions = new HashMap<>(metrics.getTopicDescriptions());
|
||||
updatedDescriptions.putAll(descriptions);
|
||||
var updatedConfigs = new HashMap<>(metrics.getTopicConfigs());
|
||||
updatedConfigs.putAll(configs);
|
||||
Map<String, List<ConfigEntry>> configs,
|
||||
InternalPartitionsOffsets partitionsOffsets) {
|
||||
var stats = get(c);
|
||||
replace(
|
||||
c,
|
||||
metrics.toBuilder()
|
||||
.topicDescriptions(updatedDescriptions)
|
||||
.topicConfigs(updatedConfigs)
|
||||
stats.toBuilder()
|
||||
.clusterState(stats.getClusterState().updateTopics(descriptions, configs, partitionsOffsets))
|
||||
.build()
|
||||
);
|
||||
}
|
||||
|
||||
public synchronized void onTopicDelete(KafkaCluster c, String topic) {
|
||||
var metrics = get(c);
|
||||
var updatedDescriptions = new HashMap<>(metrics.getTopicDescriptions());
|
||||
updatedDescriptions.remove(topic);
|
||||
var updatedConfigs = new HashMap<>(metrics.getTopicConfigs());
|
||||
updatedConfigs.remove(topic);
|
||||
var stats = get(c);
|
||||
replace(
|
||||
c,
|
||||
metrics.toBuilder()
|
||||
.topicDescriptions(updatedDescriptions)
|
||||
.topicConfigs(updatedConfigs)
|
||||
stats.toBuilder()
|
||||
.clusterState(stats.getClusterState().topicDeleted(topic))
|
||||
.build()
|
||||
);
|
||||
}
|
||||
|
||||
public Statistics get(KafkaCluster c) {
|
||||
return Objects.requireNonNull(cache.get(c.getName()), "Unknown cluster metrics requested");
|
||||
return Objects.requireNonNull(cache.get(c.getName()), "Statistics for unknown cluster requested");
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -2,21 +2,16 @@ package com.provectus.kafka.ui.service;
|
|||
|
||||
import static com.provectus.kafka.ui.service.ReactiveAdminClient.ClusterDescription;
|
||||
|
||||
import com.provectus.kafka.ui.model.ClusterFeature;
|
||||
import com.provectus.kafka.ui.model.InternalLogDirStats;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
import com.provectus.kafka.ui.model.ServerStatusDTO;
|
||||
import com.provectus.kafka.ui.model.Statistics;
|
||||
import com.provectus.kafka.ui.service.metrics.MetricsCollector;
|
||||
import java.util.List;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
|
@ -25,7 +20,6 @@ import reactor.core.publisher.Mono;
|
|||
@Slf4j
|
||||
public class StatisticsService {
|
||||
|
||||
private final MetricsCollector metricsCollector;
|
||||
private final AdminClientService adminClientService;
|
||||
private final FeatureService featureService;
|
||||
private final StatisticsCache cache;
|
||||
|
@ -36,44 +30,38 @@ public class StatisticsService {
|
|||
|
||||
private Mono<Statistics> getStatistics(KafkaCluster cluster) {
|
||||
return adminClientService.get(cluster).flatMap(ac ->
|
||||
ac.describeCluster().flatMap(description ->
|
||||
ac.updateInternalStats(description.getController()).then(
|
||||
Mono.zip(
|
||||
List.of(
|
||||
metricsCollector.getBrokerMetrics(cluster, description.getNodes()),
|
||||
getLogDirInfo(description, ac),
|
||||
featureService.getAvailableFeatures(ac, cluster, description),
|
||||
loadTopicConfigs(cluster),
|
||||
describeTopics(cluster)),
|
||||
results ->
|
||||
Statistics.builder()
|
||||
.status(ServerStatusDTO.ONLINE)
|
||||
.clusterDescription(description)
|
||||
.version(ac.getVersion())
|
||||
.metrics((Metrics) results[0])
|
||||
.logDirInfo((InternalLogDirStats) results[1])
|
||||
.features((List<ClusterFeature>) results[2])
|
||||
.topicConfigs((Map<String, List<ConfigEntry>>) results[3])
|
||||
.topicDescriptions((Map<String, TopicDescription>) results[4])
|
||||
.build()
|
||||
))))
|
||||
ac.describeCluster()
|
||||
.flatMap(description ->
|
||||
ac.updateInternalStats(description.getController())
|
||||
.then(
|
||||
Mono.zip(
|
||||
featureService.getAvailableFeatures(ac, cluster, description),
|
||||
loadClusterState(description, ac)
|
||||
).flatMap(featuresAndState ->
|
||||
scrapeMetrics(cluster, featuresAndState.getT2(), description)
|
||||
.map(metrics ->
|
||||
Statistics.builder()
|
||||
.status(ServerStatusDTO.ONLINE)
|
||||
.clusterDescription(description)
|
||||
.version(ac.getVersion())
|
||||
.metrics(metrics)
|
||||
.features(featuresAndState.getT1())
|
||||
.clusterState(featuresAndState.getT2())
|
||||
.build())))))
|
||||
.doOnError(e ->
|
||||
log.error("Failed to collect cluster {} info", cluster.getName(), e))
|
||||
.onErrorResume(
|
||||
e -> Mono.just(Statistics.empty().toBuilder().lastKafkaException(e).build()));
|
||||
}
|
||||
|
||||
private Mono<InternalLogDirStats> getLogDirInfo(ClusterDescription desc, ReactiveAdminClient ac) {
|
||||
var brokerIds = desc.getNodes().stream().map(Node::id).collect(Collectors.toSet());
|
||||
return ac.describeLogDirs(brokerIds).map(InternalLogDirStats::new);
|
||||
private Mono<ScrapedClusterState> loadClusterState(ClusterDescription clusterDescription, ReactiveAdminClient ac) {
|
||||
return ScrapedClusterState.scrape(clusterDescription, ac);
|
||||
}
|
||||
|
||||
private Mono<Map<String, TopicDescription>> describeTopics(KafkaCluster c) {
|
||||
return adminClientService.get(c).flatMap(ReactiveAdminClient::describeTopics);
|
||||
}
|
||||
|
||||
private Mono<Map<String, List<ConfigEntry>>> loadTopicConfigs(KafkaCluster c) {
|
||||
return adminClientService.get(c).flatMap(ReactiveAdminClient::getTopicsConfig);
|
||||
private Mono<Metrics> scrapeMetrics(KafkaCluster cluster,
|
||||
ScrapedClusterState clusterState,
|
||||
ClusterDescription clusterDescription) {
|
||||
return cluster.getMetricsScrapping().scrape(clusterState, clusterDescription.getNodes());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.TopicState;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
|
@ -25,6 +26,7 @@ import com.provectus.kafka.ui.model.ReplicationFactorChangeResponseDTO;
|
|||
import com.provectus.kafka.ui.model.Statistics;
|
||||
import com.provectus.kafka.ui.model.TopicCreationDTO;
|
||||
import com.provectus.kafka.ui.model.TopicUpdateDTO;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
|
||||
import java.time.Duration;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
@ -39,7 +41,6 @@ import org.apache.kafka.clients.admin.ConfigEntry;
|
|||
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.admin.ProducerState;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
@ -72,20 +73,19 @@ public class TopicsService {
|
|||
return adminClientService.get(c)
|
||||
.flatMap(ac ->
|
||||
ac.describeTopics(topics).zipWith(ac.getTopicsConfig(topics, false),
|
||||
(descriptions, configs) -> {
|
||||
statisticsCache.update(c, descriptions, configs);
|
||||
return getPartitionOffsets(descriptions, ac).map(offsets -> {
|
||||
var metrics = statisticsCache.get(c);
|
||||
return createList(
|
||||
topics,
|
||||
descriptions,
|
||||
configs,
|
||||
offsets,
|
||||
metrics.getMetrics(),
|
||||
metrics.getLogDirInfo()
|
||||
);
|
||||
});
|
||||
})).flatMap(Function.identity());
|
||||
(descriptions, configs) ->
|
||||
getPartitionOffsets(descriptions, ac).map(offsets -> {
|
||||
statisticsCache.update(c, descriptions, configs, offsets);
|
||||
var stats = statisticsCache.get(c);
|
||||
return createList(
|
||||
topics,
|
||||
descriptions,
|
||||
configs,
|
||||
offsets,
|
||||
stats.getMetrics(),
|
||||
stats.getClusterState()
|
||||
);
|
||||
}))).flatMap(Function.identity());
|
||||
}
|
||||
|
||||
private Mono<InternalTopic> loadTopic(KafkaCluster c, String topicName) {
|
||||
|
@ -96,8 +96,8 @@ public class TopicsService {
|
|||
}
|
||||
|
||||
/**
|
||||
* After creation topic can be invisible via API for some time.
|
||||
* To workaround this, we retyring topic loading until it becomes visible.
|
||||
* After creation topic can be invisible via API for some time.
|
||||
* To workaround this, we retyring topic loading until it becomes visible.
|
||||
*/
|
||||
private Mono<InternalTopic> loadTopicAfterCreation(KafkaCluster c, String topicName) {
|
||||
return loadTopic(c, topicName)
|
||||
|
@ -123,7 +123,7 @@ public class TopicsService {
|
|||
Map<String, List<ConfigEntry>> configs,
|
||||
InternalPartitionsOffsets partitionsOffsets,
|
||||
Metrics metrics,
|
||||
InternalLogDirStats logDirInfo) {
|
||||
ScrapedClusterState clusterState) {
|
||||
return orderedNames.stream()
|
||||
.filter(descriptions::containsKey)
|
||||
.map(t -> InternalTopic.from(
|
||||
|
@ -131,7 +131,8 @@ public class TopicsService {
|
|||
configs.getOrDefault(t, List.of()),
|
||||
partitionsOffsets,
|
||||
metrics,
|
||||
logDirInfo,
|
||||
Optional.ofNullable(clusterState.getTopicStates().get(t)).map(s -> s.segmentStats()).orElse(null),
|
||||
Optional.ofNullable(clusterState.getTopicStates().get(t)).map(s -> s.partitionsSegmentStats()).orElse(null),
|
||||
clustersProperties.getInternalTopicPrefix()
|
||||
))
|
||||
.collect(toList());
|
||||
|
@ -226,7 +227,7 @@ public class TopicsService {
|
|||
}
|
||||
|
||||
public Mono<InternalTopic> updateTopic(KafkaCluster cl, String topicName,
|
||||
Mono<TopicUpdateDTO> topicUpdate) {
|
||||
Mono<TopicUpdateDTO> topicUpdate) {
|
||||
return topicUpdate
|
||||
.flatMap(t -> updateTopic(cl, topicName, t));
|
||||
}
|
||||
|
@ -445,26 +446,25 @@ public class TopicsService {
|
|||
|
||||
public Mono<List<InternalTopic>> getTopicsForPagination(KafkaCluster cluster) {
|
||||
Statistics stats = statisticsCache.get(cluster);
|
||||
return filterExisting(cluster, stats.getTopicDescriptions().keySet())
|
||||
Map<String, TopicState> topicStates = stats.getClusterState().getTopicStates();
|
||||
return filterExisting(cluster, topicStates.keySet())
|
||||
.map(lst -> lst.stream()
|
||||
.map(topicName ->
|
||||
InternalTopic.from(
|
||||
stats.getTopicDescriptions().get(topicName),
|
||||
stats.getTopicConfigs().getOrDefault(topicName, List.of()),
|
||||
topicStates.get(topicName).description(),
|
||||
topicStates.get(topicName).configs(),
|
||||
InternalPartitionsOffsets.empty(),
|
||||
stats.getMetrics(),
|
||||
stats.getLogDirInfo(),
|
||||
Optional.ofNullable(topicStates.get(topicName))
|
||||
.map(TopicState::segmentStats).orElse(null),
|
||||
Optional.ofNullable(topicStates.get(topicName))
|
||||
.map(TopicState::partitionsSegmentStats).orElse(null),
|
||||
clustersProperties.getInternalTopicPrefix()
|
||||
))
|
||||
))
|
||||
.collect(toList())
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<Map<TopicPartition, List<ProducerState>>> getActiveProducersState(KafkaCluster cluster, String topic) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> ac.getActiveProducersState(topic));
|
||||
}
|
||||
|
||||
private Mono<List<String>> filterExisting(KafkaCluster cluster, Collection<String> topics) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> ac.listTopics(true))
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
package com.provectus.kafka.ui.service.graphs;
|
||||
|
||||
import java.time.Duration;
|
||||
import java.util.Set;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.Builder;
|
||||
|
||||
@Builder
|
||||
public record GraphDescription(String id,
|
||||
@Nullable Duration defaultInterval, //null for instant queries, set for range
|
||||
String prometheusQuery,
|
||||
Set<String> params) {
|
||||
|
||||
public static GraphDescriptionBuilder instant() {
|
||||
return builder();
|
||||
}
|
||||
|
||||
public static GraphDescriptionBuilder range(Duration defaultInterval) {
|
||||
return builder().defaultInterval(defaultInterval);
|
||||
}
|
||||
|
||||
public boolean isRange() {
|
||||
return defaultInterval != null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package com.provectus.kafka.ui.service.graphs;
|
||||
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import java.time.Duration;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Stream;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
class GraphDescriptions {
|
||||
|
||||
private static final Duration DEFAULT_RANGE_DURATION = Duration.ofDays(7);
|
||||
|
||||
private final Map<String, GraphDescription> graphsById;
|
||||
|
||||
GraphDescriptions() {
|
||||
validate();
|
||||
this.graphsById = PREDEFINED_GRAPHS.stream().collect(toMap(GraphDescription::id, d -> d));
|
||||
}
|
||||
|
||||
Optional<GraphDescription> getById(String id) {
|
||||
return Optional.ofNullable(graphsById.get(id));
|
||||
}
|
||||
|
||||
Stream<GraphDescription> all() {
|
||||
return graphsById.values().stream();
|
||||
}
|
||||
|
||||
private void validate() {
|
||||
Map<String, String> errors = new HashMap<>();
|
||||
for (GraphDescription description : PREDEFINED_GRAPHS) {
|
||||
new PromQueryTemplate(description)
|
||||
.validateSyntax()
|
||||
.ifPresent(err -> errors.put(description.id(), err));
|
||||
}
|
||||
if (!errors.isEmpty()) {
|
||||
throw new ValidationException("Error validating queries for following graphs: " + errors);
|
||||
}
|
||||
}
|
||||
|
||||
private static final List<GraphDescription> PREDEFINED_GRAPHS = List.of(
|
||||
|
||||
GraphDescription.range(DEFAULT_RANGE_DURATION)
|
||||
.id("broker_bytes_disk_ts")
|
||||
.prometheusQuery("broker_bytes_disk{cluster=\"${cluster}\"}")
|
||||
.params(Set.of())
|
||||
.build(),
|
||||
|
||||
GraphDescription.instant()
|
||||
.id("broker_bytes_disk")
|
||||
.prometheusQuery("broker_bytes_disk{cluster=\"${cluster}\"}")
|
||||
.params(Set.of())
|
||||
.build(),
|
||||
|
||||
GraphDescription.instant()
|
||||
.id("kafka_topic_partition_current_offset")
|
||||
.prometheusQuery("kafka_topic_partition_current_offset{cluster=\"${cluster}\"}")
|
||||
.params(Set.of())
|
||||
.build(),
|
||||
|
||||
GraphDescription.range(DEFAULT_RANGE_DURATION)
|
||||
.id("kafka_topic_partition_current_offset_per_topic_ts")
|
||||
.prometheusQuery("kafka_topic_partition_current_offset{cluster=\"${cluster}\",topic = \"${topic}\"}")
|
||||
.params(Set.of("topic"))
|
||||
.build()
|
||||
);
|
||||
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
package com.provectus.kafka.ui.service.graphs;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.springframework.stereotype.Component;
|
||||
import prometheus.query.api.PrometheusClientApi;
|
||||
import prometheus.query.model.QueryResponse;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Component
|
||||
@RequiredArgsConstructor
|
||||
public class GraphsService {
|
||||
|
||||
private static final int TARGET_MATRIX_DATA_POINTS = 200;
|
||||
|
||||
private final GraphDescriptions graphDescriptions;
|
||||
|
||||
public Mono<QueryResponse> getGraphData(KafkaCluster cluster,
|
||||
String id,
|
||||
@Nullable Instant from,
|
||||
@Nullable Instant to,
|
||||
@Nullable Map<String, String> params) {
|
||||
|
||||
var graph = graphDescriptions.getById(id)
|
||||
.orElseThrow(() -> new NotFoundException("No graph found with id = " + id));
|
||||
|
||||
var promClient = cluster.getPrometheusStorageClient();
|
||||
if (promClient == null) {
|
||||
throw new ValidationException("Prometheus not configured for cluster");
|
||||
}
|
||||
String preparedQuery = prepareQuery(graph, cluster.getName(), params);
|
||||
return cluster.getPrometheusStorageClient()
|
||||
.mono(client -> {
|
||||
if (graph.isRange()) {
|
||||
return queryRange(client, preparedQuery, graph.defaultInterval(), from, to);
|
||||
}
|
||||
return queryInstant(client, preparedQuery);
|
||||
});
|
||||
}
|
||||
|
||||
private Mono<QueryResponse> queryRange(PrometheusClientApi c,
|
||||
String preparedQuery,
|
||||
Duration defaultPeriod,
|
||||
@Nullable Instant from,
|
||||
@Nullable Instant to) {
|
||||
if (from == null) {
|
||||
from = Instant.now().minus(defaultPeriod);
|
||||
}
|
||||
if (to == null) {
|
||||
to = Instant.now();
|
||||
}
|
||||
Preconditions.checkArgument(to.isAfter(from));
|
||||
return c.queryRange(
|
||||
preparedQuery,
|
||||
String.valueOf(from.getEpochSecond()),
|
||||
String.valueOf(to.getEpochSecond()),
|
||||
calculateStepSize(from, to),
|
||||
null
|
||||
);
|
||||
}
|
||||
|
||||
private String calculateStepSize(Instant from, Instant to) {
|
||||
long intervalInSecs = to.getEpochSecond() - from.getEpochSecond();
|
||||
if (intervalInSecs <= TARGET_MATRIX_DATA_POINTS) {
|
||||
return intervalInSecs + "s";
|
||||
}
|
||||
int step = ((int) (((double) intervalInSecs) / TARGET_MATRIX_DATA_POINTS));
|
||||
return step + "s";
|
||||
}
|
||||
|
||||
private Mono<QueryResponse> queryInstant(PrometheusClientApi c, String preparedQuery) {
|
||||
return c.query(preparedQuery, null, null);
|
||||
}
|
||||
|
||||
private String prepareQuery(GraphDescription d, String clusterName, @Nullable Map<String, String> params) {
|
||||
return new PromQueryTemplate(d).getQuery(clusterName, Optional.ofNullable(params).orElse(Map.of()));
|
||||
}
|
||||
|
||||
public Stream<GraphDescription> getGraphs(KafkaCluster cluster) {
|
||||
if (cluster.getPrometheusStorageClient() == null) {
|
||||
return Stream.empty();
|
||||
}
|
||||
return graphDescriptions.all();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
package com.provectus.kafka.ui.service.graphs;
|
||||
|
||||
import java.util.Optional;
|
||||
import org.antlr.v4.runtime.BailErrorStrategy;
|
||||
import org.antlr.v4.runtime.CharStreams;
|
||||
import org.antlr.v4.runtime.CommonTokenStream;
|
||||
import org.antlr.v4.runtime.misc.ParseCancellationException;
|
||||
import promql.PromQLLexer;
|
||||
import promql.PromQLParser;
|
||||
|
||||
class PromQueryLangGrammar {
|
||||
|
||||
// returns error msg, or empty if query is valid
|
||||
static Optional<String> validateExpression(String query) {
|
||||
try {
|
||||
parseExpression(query);
|
||||
return Optional.empty();
|
||||
} catch (ParseCancellationException e) {
|
||||
//TODO: add more descriptive msg
|
||||
return Optional.of("Syntax error");
|
||||
}
|
||||
}
|
||||
|
||||
static PromQLParser.ExpressionContext parseExpression(String query) {
|
||||
return createParser(query).expression();
|
||||
}
|
||||
|
||||
private static PromQLParser createParser(String str) {
|
||||
var parser = new PromQLParser(new CommonTokenStream(new PromQLLexer(CharStreams.fromString(str))));
|
||||
parser.removeErrorListeners();
|
||||
parser.setErrorHandler(new BailErrorStrategy());
|
||||
return parser;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
package com.provectus.kafka.ui.service.graphs;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import org.apache.commons.lang3.text.StrSubstitutor;
|
||||
|
||||
class PromQueryTemplate {
|
||||
|
||||
private static final String CLUSTER_LABEL_NAME = "cluster";
|
||||
|
||||
private final String queryTemplate;
|
||||
private final Set<String> paramsNames;
|
||||
|
||||
PromQueryTemplate(GraphDescription d) {
|
||||
this(d.prometheusQuery(), d.params());
|
||||
}
|
||||
|
||||
PromQueryTemplate(String templateQueryString, Set<String> paramsNames) {
|
||||
this.queryTemplate = templateQueryString;
|
||||
this.paramsNames = paramsNames;
|
||||
}
|
||||
|
||||
String getQuery(String clusterName, Map<String, String> paramValues) {
|
||||
var missingParams = Sets.difference(paramsNames, paramValues.keySet());
|
||||
if (!missingParams.isEmpty()) {
|
||||
throw new ValidationException("Not all params set for query, missing: " + missingParams);
|
||||
}
|
||||
Map<String, String> replacements = new HashMap<>(paramValues);
|
||||
replacements.put(CLUSTER_LABEL_NAME, clusterName);
|
||||
return replaceParams(replacements);
|
||||
}
|
||||
|
||||
// returns error msg or empty if no errors found
|
||||
Optional<String> validateSyntax() {
|
||||
Map<String, String> fakeReplacements = new HashMap<>();
|
||||
fakeReplacements.put(CLUSTER_LABEL_NAME, "1");
|
||||
paramsNames.forEach(paramName -> fakeReplacements.put(paramName, "1"));
|
||||
|
||||
String prepared = replaceParams(fakeReplacements);
|
||||
return PromQueryLangGrammar.validateExpression(prepared);
|
||||
}
|
||||
|
||||
private String replaceParams(Map<String, String> replacements) {
|
||||
return new StrSubstitutor(replacements).replace(queryTemplate);
|
||||
}
|
||||
|
||||
}
|
|
@ -8,7 +8,7 @@ import java.util.Objects;
|
|||
import java.util.function.Function;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.commons.collections4.CollectionUtils;
|
||||
import org.apache.commons.collections.CollectionUtils;
|
||||
import org.opendatadiscovery.oddrn.JdbcUrlParser;
|
||||
import org.opendatadiscovery.oddrn.model.HivePath;
|
||||
import org.opendatadiscovery.oddrn.model.MysqlPath;
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
package com.provectus.kafka.ui.service.integration.odd;
|
||||
|
||||
import static com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState.TopicState;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Statistics;
|
||||
import com.provectus.kafka.ui.service.StatisticsCache;
|
||||
import com.provectus.kafka.ui.service.integration.odd.schema.DataSetFieldsExtractors;
|
||||
import com.provectus.kafka.ui.sr.model.SchemaSubject;
|
||||
|
@ -37,10 +38,10 @@ class TopicsExporter {
|
|||
|
||||
Flux<DataEntityList> export(KafkaCluster cluster) {
|
||||
String clusterOddrn = Oddrn.clusterOddrn(cluster);
|
||||
Statistics stats = statisticsCache.get(cluster);
|
||||
return Flux.fromIterable(stats.getTopicDescriptions().keySet())
|
||||
var clusterState = statisticsCache.get(cluster).getClusterState();
|
||||
return Flux.fromIterable(clusterState.getTopicStates().keySet())
|
||||
.filter(topicFilter)
|
||||
.flatMap(topic -> createTopicDataEntity(cluster, topic, stats))
|
||||
.flatMap(topic -> createTopicDataEntity(cluster, topic, clusterState.getTopicStates().get(topic)))
|
||||
.onErrorContinue(
|
||||
(th, topic) -> log.warn("Error exporting data for topic {}, cluster {}", topic, cluster.getName(), th))
|
||||
.buffer(100)
|
||||
|
@ -50,7 +51,7 @@ class TopicsExporter {
|
|||
.items(topicsEntities));
|
||||
}
|
||||
|
||||
private Mono<DataEntity> createTopicDataEntity(KafkaCluster cluster, String topic, Statistics stats) {
|
||||
private Mono<DataEntity> createTopicDataEntity(KafkaCluster cluster, String topic, TopicState topicState) {
|
||||
KafkaPath topicOddrnPath = Oddrn.topicOddrnPath(cluster, topic);
|
||||
return
|
||||
Mono.zip(
|
||||
|
@ -70,13 +71,13 @@ class TopicsExporter {
|
|||
.addMetadataItem(
|
||||
new MetadataExtension()
|
||||
.schemaUrl(URI.create("wontbeused.oops"))
|
||||
.metadata(getTopicMetadata(topic, stats)));
|
||||
.metadata(getTopicMetadata(topicState)));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
private Map<String, Object> getNonDefaultConfigs(String topic, Statistics stats) {
|
||||
List<ConfigEntry> config = stats.getTopicConfigs().get(topic);
|
||||
private Map<String, Object> getNonDefaultConfigs(TopicState topicState) {
|
||||
List<ConfigEntry> config = topicState.configs();
|
||||
if (config == null) {
|
||||
return Map.of();
|
||||
}
|
||||
|
@ -85,12 +86,12 @@ class TopicsExporter {
|
|||
.collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value));
|
||||
}
|
||||
|
||||
private Map<String, Object> getTopicMetadata(String topic, Statistics stats) {
|
||||
TopicDescription topicDescription = stats.getTopicDescriptions().get(topic);
|
||||
private Map<String, Object> getTopicMetadata(TopicState topicState) {
|
||||
TopicDescription topicDescription = topicState.description();
|
||||
return ImmutableMap.<String, Object>builder()
|
||||
.put("partitions", topicDescription.partitions().size())
|
||||
.put("replication_factor", topicDescription.partitions().get(0).replicas().size())
|
||||
.putAll(getNonDefaultConfigs(topic, stats))
|
||||
.putAll(getNonDefaultConfigs(topicState))
|
||||
.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
import com.provectus.kafka.ui.model.MetricsConfig;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.springframework.stereotype.Component;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Component
|
||||
@Slf4j
|
||||
@RequiredArgsConstructor
|
||||
public class MetricsCollector {
|
||||
|
||||
private final JmxMetricsRetriever jmxMetricsRetriever;
|
||||
private final PrometheusMetricsRetriever prometheusMetricsRetriever;
|
||||
|
||||
public Mono<Metrics> getBrokerMetrics(KafkaCluster cluster, Collection<Node> nodes) {
|
||||
return Flux.fromIterable(nodes)
|
||||
.flatMap(n -> getMetrics(cluster, n).map(lst -> Tuples.of(n, lst)))
|
||||
.collectMap(Tuple2::getT1, Tuple2::getT2)
|
||||
.map(nodeMetrics -> collectMetrics(cluster, nodeMetrics))
|
||||
.defaultIfEmpty(Metrics.empty());
|
||||
}
|
||||
|
||||
private Mono<List<RawMetric>> getMetrics(KafkaCluster kafkaCluster, Node node) {
|
||||
Flux<RawMetric> metricFlux = Flux.empty();
|
||||
if (kafkaCluster.getMetricsConfig() != null) {
|
||||
String type = kafkaCluster.getMetricsConfig().getType();
|
||||
if (type == null || type.equalsIgnoreCase(MetricsConfig.JMX_METRICS_TYPE)) {
|
||||
metricFlux = jmxMetricsRetriever.retrieve(kafkaCluster, node);
|
||||
} else if (type.equalsIgnoreCase(MetricsConfig.PROMETHEUS_METRICS_TYPE)) {
|
||||
metricFlux = prometheusMetricsRetriever.retrieve(kafkaCluster, node);
|
||||
}
|
||||
}
|
||||
return metricFlux.collectList();
|
||||
}
|
||||
|
||||
public Metrics collectMetrics(KafkaCluster cluster, Map<Node, List<RawMetric>> perBrokerMetrics) {
|
||||
Metrics.MetricsBuilder builder = Metrics.builder()
|
||||
.perBrokerMetrics(
|
||||
perBrokerMetrics.entrySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue)));
|
||||
|
||||
populateWellknowMetrics(cluster, perBrokerMetrics)
|
||||
.apply(builder);
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
private WellKnownMetrics populateWellknowMetrics(KafkaCluster cluster, Map<Node, List<RawMetric>> perBrokerMetrics) {
|
||||
WellKnownMetrics wellKnownMetrics = new WellKnownMetrics();
|
||||
perBrokerMetrics.forEach((node, metrics) ->
|
||||
metrics.forEach(metric ->
|
||||
wellKnownMetrics.populate(node, metric)));
|
||||
return wellKnownMetrics;
|
||||
}
|
||||
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import org.apache.kafka.common.Node;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
||||
interface MetricsRetriever {
|
||||
Flux<RawMetric> retrieve(KafkaCluster c, Node node);
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.math.NumberUtils;
|
||||
|
||||
@Slf4j
|
||||
class PrometheusEndpointMetricsParser {
|
||||
|
||||
/**
|
||||
* Matches openmetrics format. For example, string:
|
||||
* kafka_server_BrokerTopicMetrics_FiveMinuteRate{name="BytesInPerSec",topic="__consumer_offsets",} 16.94886650744339
|
||||
* will produce:
|
||||
* name=kafka_server_BrokerTopicMetrics_FiveMinuteRate
|
||||
* value=16.94886650744339
|
||||
* labels={name="BytesInPerSec", topic="__consumer_offsets"}",
|
||||
*/
|
||||
private static final Pattern PATTERN = Pattern.compile(
|
||||
"(?<metricName>^\\w+)([ \t]*\\{*(?<properties>.*)}*)[ \\t]+(?<value>[\\d]+\\.?[\\d]+)?");
|
||||
|
||||
static Optional<RawMetric> parse(String s) {
|
||||
Matcher matcher = PATTERN.matcher(s);
|
||||
if (matcher.matches()) {
|
||||
String value = matcher.group("value");
|
||||
String metricName = matcher.group("metricName");
|
||||
if (metricName == null || !NumberUtils.isCreatable(value)) {
|
||||
return Optional.empty();
|
||||
}
|
||||
var labels = Arrays.stream(matcher.group("properties").split(","))
|
||||
.filter(str -> !"".equals(str))
|
||||
.map(str -> str.split("="))
|
||||
.filter(spit -> spit.length == 2)
|
||||
.collect(Collectors.toUnmodifiableMap(
|
||||
str -> str[0].trim(),
|
||||
str -> str[1].trim().replace("\"", "")));
|
||||
|
||||
return Optional.of(RawMetric.create(metricName, labels, new BigDecimal(value)));
|
||||
}
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Strings;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.MetricsConfig;
|
||||
import com.provectus.kafka.ui.util.WebClientConfigurator;
|
||||
import java.util.Arrays;
|
||||
import java.util.Optional;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.util.unit.DataSize;
|
||||
import org.springframework.web.reactive.function.client.WebClient;
|
||||
import org.springframework.web.util.UriComponentsBuilder;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Service
|
||||
@Slf4j
|
||||
class PrometheusMetricsRetriever implements MetricsRetriever {
|
||||
|
||||
private static final String METRICS_ENDPOINT_PATH = "/metrics";
|
||||
private static final int DEFAULT_EXPORTER_PORT = 11001;
|
||||
|
||||
@Override
|
||||
public Flux<RawMetric> retrieve(KafkaCluster c, Node node) {
|
||||
log.debug("Retrieving metrics from prometheus exporter: {}:{}", node.host(), c.getMetricsConfig().getPort());
|
||||
|
||||
MetricsConfig metricsConfig = c.getMetricsConfig();
|
||||
var webClient = new WebClientConfigurator()
|
||||
.configureBufferSize(DataSize.ofMegabytes(20))
|
||||
.configureBasicAuth(metricsConfig.getUsername(), metricsConfig.getPassword())
|
||||
.configureSsl(
|
||||
c.getOriginalProperties().getSsl(),
|
||||
new ClustersProperties.KeystoreConfig(
|
||||
metricsConfig.getKeystoreLocation(),
|
||||
metricsConfig.getKeystorePassword()))
|
||||
.build();
|
||||
|
||||
return retrieve(webClient, node.host(), c.getMetricsConfig());
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
Flux<RawMetric> retrieve(WebClient webClient, String host, MetricsConfig metricsConfig) {
|
||||
int port = Optional.ofNullable(metricsConfig.getPort()).orElse(DEFAULT_EXPORTER_PORT);
|
||||
boolean sslEnabled = metricsConfig.isSsl() || metricsConfig.getKeystoreLocation() != null;
|
||||
var request = webClient.get()
|
||||
.uri(UriComponentsBuilder.newInstance()
|
||||
.scheme(sslEnabled ? "https" : "http")
|
||||
.host(host)
|
||||
.port(port)
|
||||
.path(METRICS_ENDPOINT_PATH).build().toUri());
|
||||
|
||||
WebClient.ResponseSpec responseSpec = request.retrieve();
|
||||
return responseSpec.bodyToMono(String.class)
|
||||
.doOnError(e -> log.error("Error while getting metrics from {}", host, e))
|
||||
.onErrorResume(th -> Mono.empty())
|
||||
.flatMapMany(body ->
|
||||
Flux.fromStream(
|
||||
Arrays.stream(body.split("\\n"))
|
||||
.filter(str -> !Strings.isNullOrEmpty(str) && !str.startsWith("#")) // skipping comments strings
|
||||
.map(PrometheusEndpointMetricsParser::parse)
|
||||
.filter(Optional::isPresent)
|
||||
.map(Optional::get)
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
|
@ -1,10 +1,15 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
import static io.prometheus.client.Collector.Type;
|
||||
|
||||
import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import lombok.AllArgsConstructor;
|
||||
import lombok.EqualsAndHashCode;
|
||||
import lombok.ToString;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public interface RawMetric {
|
||||
|
||||
|
@ -14,47 +19,27 @@ public interface RawMetric {
|
|||
|
||||
BigDecimal value();
|
||||
|
||||
// Key, that can be used for metrics reductions
|
||||
default Object identityKey() {
|
||||
return name() + "_" + labels();
|
||||
}
|
||||
|
||||
RawMetric copyWithValue(BigDecimal newValue);
|
||||
|
||||
//--------------------------------------------------
|
||||
|
||||
static RawMetric create(String name, Map<String, String> labels, BigDecimal value) {
|
||||
return new SimpleMetric(name, labels, value);
|
||||
}
|
||||
|
||||
@AllArgsConstructor
|
||||
@EqualsAndHashCode
|
||||
@ToString
|
||||
class SimpleMetric implements RawMetric {
|
||||
|
||||
private final String name;
|
||||
private final Map<String, String> labels;
|
||||
private final BigDecimal value;
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> labels() {
|
||||
return labels;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BigDecimal value() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RawMetric copyWithValue(BigDecimal newValue) {
|
||||
return new SimpleMetric(name, labels, newValue);
|
||||
static Stream<MetricFamilySamples> groupIntoMfs(Collection<RawMetric> rawMetrics) {
|
||||
Map<String, MetricFamilySamples> map = new LinkedHashMap<>();
|
||||
for (RawMetric m : rawMetrics) {
|
||||
var mfs = map.get(m.name());
|
||||
if (mfs == null) {
|
||||
mfs = new MetricFamilySamples(m.name(), Type.GAUGE, m.name(), new ArrayList<>());
|
||||
map.put(m.name(), mfs);
|
||||
}
|
||||
List<String> lbls = m.labels().keySet().stream().toList();
|
||||
List<String> lblVals = lbls.stream().map(l -> m.labels().get(l)).toList();
|
||||
mfs.samples.add(new MetricFamilySamples.Sample(m.name(), lbls, lblVals, m.value().doubleValue()));
|
||||
}
|
||||
return map.values().stream();
|
||||
}
|
||||
|
||||
record SimpleMetric(String name, Map<String, String> labels, BigDecimal value) implements RawMetric { }
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
import com.google.common.collect.Streams;
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
import groovy.lang.Tuple;
|
||||
import io.prometheus.client.Collector.MetricFamilySamples;
|
||||
import java.util.Collection;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
public class SummarizedMetrics {
|
||||
|
||||
private final Metrics metrics;
|
||||
|
||||
public Stream<MetricFamilySamples> asStream() {
|
||||
return Streams.concat(
|
||||
metrics.getInferredMetrics().asStream(),
|
||||
metrics.getPerBrokerScrapedMetrics()
|
||||
.values()
|
||||
.stream()
|
||||
.flatMap(Collection::stream)
|
||||
.collect(toMap(mfs -> mfs.name, Optional::of, SummarizedMetrics::summarizeMfs, LinkedHashMap::new))
|
||||
.values()
|
||||
.stream()
|
||||
.filter(Optional::isPresent)
|
||||
.map(Optional::get)
|
||||
);
|
||||
}
|
||||
|
||||
//returns Optional.empty if merging not supported for metric type
|
||||
private static Optional<MetricFamilySamples> summarizeMfs(Optional<MetricFamilySamples> mfs1opt,
|
||||
Optional<MetricFamilySamples> mfs2opt) {
|
||||
if ((mfs1opt.isEmpty() || mfs2opt.isEmpty()) || (mfs1opt.get().type != mfs2opt.get().type)) {
|
||||
return Optional.empty();
|
||||
}
|
||||
var mfs1 = mfs1opt.get();
|
||||
return switch (mfs1.type) {
|
||||
case GAUGE, COUNTER -> Optional.of(
|
||||
new MetricFamilySamples(
|
||||
mfs1.name,
|
||||
mfs1.type,
|
||||
mfs1.help,
|
||||
Stream.concat(mfs1.samples.stream(), mfs2opt.get().samples.stream())
|
||||
.collect(
|
||||
toMap(
|
||||
// merging samples with same labels
|
||||
s -> Tuple.tuple(s.name, s.labelNames, s.labelValues),
|
||||
s -> s,
|
||||
(s1, s2) -> new MetricFamilySamples.Sample(
|
||||
s1.name,
|
||||
s1.labelNames,
|
||||
s1.labelValues,
|
||||
s1.value + s2.value
|
||||
),
|
||||
LinkedHashMap::new
|
||||
)
|
||||
)
|
||||
.values()
|
||||
.stream()
|
||||
.toList()
|
||||
)
|
||||
);
|
||||
default -> Optional.empty();
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
|
||||
import static org.apache.commons.lang3.StringUtils.containsIgnoreCase;
|
||||
import static org.apache.commons.lang3.StringUtils.endsWithIgnoreCase;
|
||||
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.Node;
|
||||
|
||||
class WellKnownMetrics {
|
||||
|
||||
private static final String BROKER_TOPIC_METRICS = "BrokerTopicMetrics";
|
||||
private static final String FIFTEEN_MINUTE_RATE = "FifteenMinuteRate";
|
||||
|
||||
// per broker
|
||||
final Map<Integer, BigDecimal> brokerBytesInFifteenMinuteRate = new HashMap<>();
|
||||
final Map<Integer, BigDecimal> brokerBytesOutFifteenMinuteRate = new HashMap<>();
|
||||
|
||||
// per topic
|
||||
final Map<String, BigDecimal> bytesInFifteenMinuteRate = new HashMap<>();
|
||||
final Map<String, BigDecimal> bytesOutFifteenMinuteRate = new HashMap<>();
|
||||
|
||||
void populate(Node node, RawMetric rawMetric) {
|
||||
updateBrokerIOrates(node, rawMetric);
|
||||
updateTopicsIOrates(rawMetric);
|
||||
}
|
||||
|
||||
void apply(Metrics.MetricsBuilder metricsBuilder) {
|
||||
metricsBuilder.topicBytesInPerSec(bytesInFifteenMinuteRate);
|
||||
metricsBuilder.topicBytesOutPerSec(bytesOutFifteenMinuteRate);
|
||||
metricsBuilder.brokerBytesInPerSec(brokerBytesInFifteenMinuteRate);
|
||||
metricsBuilder.brokerBytesOutPerSec(brokerBytesOutFifteenMinuteRate);
|
||||
}
|
||||
|
||||
private void updateBrokerIOrates(Node node, RawMetric rawMetric) {
|
||||
String name = rawMetric.name();
|
||||
if (!brokerBytesInFifteenMinuteRate.containsKey(node.id())
|
||||
&& rawMetric.labels().size() == 1
|
||||
&& "BytesInPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
|
||||
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
|
||||
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
|
||||
brokerBytesInFifteenMinuteRate.put(node.id(), rawMetric.value());
|
||||
}
|
||||
if (!brokerBytesOutFifteenMinuteRate.containsKey(node.id())
|
||||
&& rawMetric.labels().size() == 1
|
||||
&& "BytesOutPerSec".equalsIgnoreCase(rawMetric.labels().get("name"))
|
||||
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
|
||||
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
|
||||
brokerBytesOutFifteenMinuteRate.put(node.id(), rawMetric.value());
|
||||
}
|
||||
}
|
||||
|
||||
private void updateTopicsIOrates(RawMetric rawMetric) {
|
||||
String name = rawMetric.name();
|
||||
String topic = rawMetric.labels().get("topic");
|
||||
if (topic != null
|
||||
&& containsIgnoreCase(name, BROKER_TOPIC_METRICS)
|
||||
&& endsWithIgnoreCase(name, FIFTEEN_MINUTE_RATE)) {
|
||||
String nameProperty = rawMetric.labels().get("name");
|
||||
if ("BytesInPerSec".equalsIgnoreCase(nameProperty)) {
|
||||
bytesInFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value()));
|
||||
} else if ("BytesOutPerSec".equalsIgnoreCase(nameProperty)) {
|
||||
bytesOutFifteenMinuteRate.compute(topic, (k, v) -> v == null ? rawMetric.value() : v.add(rawMetric.value()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package com.provectus.kafka.ui.service.metrics.prometheus;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
import static io.prometheus.client.exporter.common.TextFormat.CONTENT_TYPE_OPENMETRICS_100;
|
||||
import static org.springframework.http.HttpHeaders.CONTENT_TYPE;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.collect.Iterators;
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
import io.prometheus.client.exporter.common.TextFormat;
|
||||
import java.io.StringWriter;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.SneakyThrows;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
|
||||
public final class PrometheusExpose {
|
||||
|
||||
private static final String CLUSTER_EXPOSE_LBL_NAME = "cluster";
|
||||
private static final String BROKER_EXPOSE_LBL_NAME = "broker_id";
|
||||
|
||||
private static final HttpHeaders PROMETHEUS_EXPOSE_ENDPOINT_HEADERS;
|
||||
|
||||
static {
|
||||
PROMETHEUS_EXPOSE_ENDPOINT_HEADERS = new HttpHeaders();
|
||||
PROMETHEUS_EXPOSE_ENDPOINT_HEADERS.set(CONTENT_TYPE, CONTENT_TYPE_OPENMETRICS_100);
|
||||
}
|
||||
|
||||
private PrometheusExpose() {
|
||||
}
|
||||
|
||||
public static ResponseEntity<String> exposeAllMetrics(Map<String, Metrics> clustersMetrics) {
|
||||
return constructHttpsResponse(getMetricsForGlobalExpose(clustersMetrics));
|
||||
}
|
||||
|
||||
private static Stream<MetricFamilySamples> getMetricsForGlobalExpose(Map<String, Metrics> clustersMetrics) {
|
||||
return clustersMetrics.entrySet()
|
||||
.stream()
|
||||
.flatMap(e -> prepareMetricsForGlobalExpose(e.getKey(), e.getValue()))
|
||||
// merging MFS with same name with LinkedHashMap(for order keeping)
|
||||
.collect(Collectors.toMap(mfs -> mfs.name, mfs -> mfs,
|
||||
PrometheusExpose::concatSamples, LinkedHashMap::new))
|
||||
.values()
|
||||
.stream();
|
||||
}
|
||||
|
||||
public static Stream<MetricFamilySamples> prepareMetricsForGlobalExpose(String clusterName, Metrics metrics) {
|
||||
return Stream.concat(
|
||||
metrics.getInferredMetrics().asStream(),
|
||||
extractBrokerMetricsWithLabel(metrics)
|
||||
)
|
||||
.map(mfs -> appendLabel(mfs, CLUSTER_EXPOSE_LBL_NAME, clusterName));
|
||||
}
|
||||
|
||||
private static Stream<MetricFamilySamples> extractBrokerMetricsWithLabel(Metrics metrics) {
|
||||
return metrics.getPerBrokerScrapedMetrics().entrySet().stream()
|
||||
.flatMap(e -> {
|
||||
String brokerId = String.valueOf(e.getKey());
|
||||
return e.getValue().stream().map(mfs -> appendLabel(mfs, BROKER_EXPOSE_LBL_NAME, brokerId));
|
||||
});
|
||||
}
|
||||
|
||||
private static MetricFamilySamples concatSamples(MetricFamilySamples mfs1,
|
||||
MetricFamilySamples mfs2) {
|
||||
return new MetricFamilySamples(
|
||||
mfs1.name, mfs1.unit, mfs1.type, mfs1.help,
|
||||
Stream.concat(mfs1.samples.stream(), mfs2.samples.stream()).toList()
|
||||
);
|
||||
}
|
||||
|
||||
private static MetricFamilySamples appendLabel(MetricFamilySamples mfs, String lblName, String lblVal) {
|
||||
return new MetricFamilySamples(
|
||||
mfs.name, mfs.unit, mfs.type, mfs.help,
|
||||
mfs.samples.stream()
|
||||
.map(sample ->
|
||||
new MetricFamilySamples.Sample(
|
||||
sample.name,
|
||||
prependToList(sample.labelNames, lblName),
|
||||
prependToList(sample.labelValues, lblVal),
|
||||
sample.value
|
||||
)).toList()
|
||||
);
|
||||
}
|
||||
|
||||
private static <T> List<T> prependToList(List<T> lst, T toPrepend) {
|
||||
var result = new ArrayList<T>(lst.size() + 1);
|
||||
result.add(toPrepend);
|
||||
result.addAll(lst);
|
||||
return result;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
@SneakyThrows
|
||||
public static ResponseEntity<String> constructHttpsResponse(Stream<MetricFamilySamples> metrics) {
|
||||
StringWriter writer = new StringWriter();
|
||||
TextFormat.writeOpenMetrics100(writer, Iterators.asEnumeration(metrics.iterator()));
|
||||
return ResponseEntity
|
||||
.ok()
|
||||
.headers(PROMETHEUS_EXPOSE_ENDPOINT_HEADERS)
|
||||
.body(writer.toString());
|
||||
}
|
||||
|
||||
// copied from io.prometheus.client.exporter.common.TextFormat:writeEscapedLabelValue
|
||||
public static String escapedLabelValue(String s) {
|
||||
StringBuilder sb = new StringBuilder(s.length());
|
||||
for (int i = 0; i < s.length(); i++) {
|
||||
char c = s.charAt(i);
|
||||
switch (c) {
|
||||
case '\\' -> sb.append("\\\\");
|
||||
case '\"' -> sb.append("\\\"");
|
||||
case '\n' -> sb.append("\\n");
|
||||
default -> sb.append(c);
|
||||
}
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
import static org.apache.commons.lang3.StringUtils.containsIgnoreCase;
|
||||
import static org.apache.commons.lang3.StringUtils.endsWithIgnoreCase;
|
||||
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
// Scans external jmx/prometheus metric and tries to infer io rates
|
||||
class IoRatesMetricsScanner {
|
||||
|
||||
// per broker
|
||||
final Map<Integer, BigDecimal> brokerBytesInFifteenMinuteRate = new HashMap<>();
|
||||
final Map<Integer, BigDecimal> brokerBytesOutFifteenMinuteRate = new HashMap<>();
|
||||
|
||||
// per topic
|
||||
final Map<String, BigDecimal> bytesInFifteenMinuteRate = new HashMap<>();
|
||||
final Map<String, BigDecimal> bytesOutFifteenMinuteRate = new HashMap<>();
|
||||
|
||||
IoRatesMetricsScanner(Map<Integer, List<MetricFamilySamples>> perBrokerMetrics) {
|
||||
perBrokerMetrics.forEach((nodeId, metrics) -> {
|
||||
metrics.forEach(m -> {
|
||||
m.samples.forEach(metricSample -> {
|
||||
updateBrokerIOrates(nodeId, metricSample);
|
||||
updateTopicsIOrates(metricSample);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
Metrics.IoRates get() {
|
||||
return Metrics.IoRates.builder()
|
||||
.topicBytesInPerSec(bytesInFifteenMinuteRate)
|
||||
.topicBytesOutPerSec(bytesOutFifteenMinuteRate)
|
||||
.brokerBytesInPerSec(brokerBytesInFifteenMinuteRate)
|
||||
.brokerBytesOutPerSec(brokerBytesOutFifteenMinuteRate)
|
||||
.build();
|
||||
}
|
||||
|
||||
private void updateBrokerIOrates(int nodeId, MetricFamilySamples.Sample metric) {
|
||||
String name = metric.name;
|
||||
if (!brokerBytesInFifteenMinuteRate.containsKey(nodeId)
|
||||
&& metric.labelValues.size() == 1
|
||||
&& "BytesInPerSec".equalsIgnoreCase(metric.labelValues.get(0))
|
||||
&& containsIgnoreCase(name, "BrokerTopicMetrics")
|
||||
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
|
||||
brokerBytesInFifteenMinuteRate.put(nodeId, BigDecimal.valueOf(metric.value));
|
||||
}
|
||||
if (!brokerBytesOutFifteenMinuteRate.containsKey(nodeId)
|
||||
&& metric.labelValues.size() == 1
|
||||
&& "BytesOutPerSec".equalsIgnoreCase(metric.labelValues.get(0))
|
||||
&& containsIgnoreCase(name, "BrokerTopicMetrics")
|
||||
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
|
||||
brokerBytesOutFifteenMinuteRate.put(nodeId, BigDecimal.valueOf(metric.value));
|
||||
}
|
||||
}
|
||||
|
||||
private void updateTopicsIOrates(MetricFamilySamples.Sample metric) {
|
||||
String name = metric.name;
|
||||
int topicLblIdx = metric.labelNames.indexOf("topic");
|
||||
if (topicLblIdx >= 0
|
||||
&& containsIgnoreCase(name, "BrokerTopicMetrics")
|
||||
&& endsWithIgnoreCase(name, "FifteenMinuteRate")) {
|
||||
String topic = metric.labelValues.get(topicLblIdx);
|
||||
int nameLblIdx = metric.labelNames.indexOf("name");
|
||||
if (nameLblIdx >= 0) {
|
||||
var nameLblVal = metric.labelValues.get(nameLblIdx);
|
||||
if ("BytesInPerSec".equalsIgnoreCase(nameLblVal)) {
|
||||
BigDecimal val = BigDecimal.valueOf(metric.value);
|
||||
bytesInFifteenMinuteRate.merge(topic, val, BigDecimal::add);
|
||||
} else if ("BytesOutPerSec".equalsIgnoreCase(nameLblVal)) {
|
||||
BigDecimal val = BigDecimal.valueOf(metric.value);
|
||||
bytesOutFifteenMinuteRate.merge(topic, val, BigDecimal::add);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,94 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape;
|
||||
|
||||
import static com.provectus.kafka.ui.config.ClustersProperties.Cluster;
|
||||
import static com.provectus.kafka.ui.config.ClustersProperties.KeystoreConfig;
|
||||
import static com.provectus.kafka.ui.model.MetricsScrapeProperties.JMX_METRICS_TYPE;
|
||||
import static com.provectus.kafka.ui.model.MetricsScrapeProperties.PROMETHEUS_METRICS_TYPE;
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
import com.provectus.kafka.ui.model.MetricsScrapeProperties;
|
||||
import com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.inferred.InferredMetrics;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.inferred.InferredMetricsScraper;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.jmx.JmxMetricsRetriever;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.jmx.JmxMetricsScraper;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.prometheus.PrometheusScraper;
|
||||
import com.provectus.kafka.ui.service.metrics.sink.MetricsSink;
|
||||
import jakarta.annotation.Nullable;
|
||||
import java.util.Collection;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.common.Node;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Slf4j
|
||||
@RequiredArgsConstructor
|
||||
public class MetricsScrapping {
|
||||
|
||||
private final String clusterName;
|
||||
private final MetricsSink sink;
|
||||
private final InferredMetricsScraper inferredMetricsScraper;
|
||||
@Nullable
|
||||
private final JmxMetricsScraper jmxMetricsScraper;
|
||||
@Nullable
|
||||
private final PrometheusScraper prometheusScraper;
|
||||
|
||||
public static MetricsScrapping create(Cluster cluster,
|
||||
JmxMetricsRetriever jmxMetricsRetriever) {
|
||||
JmxMetricsScraper jmxMetricsScraper = null;
|
||||
PrometheusScraper prometheusScraper = null;
|
||||
var metrics = cluster.getMetrics();
|
||||
if (cluster.getMetrics() != null) {
|
||||
var scrapeProperties = MetricsScrapeProperties.create(cluster);
|
||||
if (metrics.getType().equalsIgnoreCase(JMX_METRICS_TYPE) && metrics.getPort() != null) {
|
||||
jmxMetricsScraper = new JmxMetricsScraper(scrapeProperties, jmxMetricsRetriever);
|
||||
} else if (metrics.getType().equalsIgnoreCase(PROMETHEUS_METRICS_TYPE)) {
|
||||
prometheusScraper = new PrometheusScraper(scrapeProperties);
|
||||
}
|
||||
}
|
||||
return new MetricsScrapping(
|
||||
cluster.getName(),
|
||||
MetricsSink.create(cluster),
|
||||
new InferredMetricsScraper(),
|
||||
jmxMetricsScraper,
|
||||
prometheusScraper
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<Metrics> scrape(ScrapedClusterState clusterState, Collection<Node> nodes) {
|
||||
Mono<InferredMetrics> inferred = inferredMetricsScraper.scrape(clusterState);
|
||||
Mono<PerBrokerScrapedMetrics> external = scrapeExternal(nodes);
|
||||
return inferred.zipWith(
|
||||
external,
|
||||
(inf, ext) -> Metrics.builder()
|
||||
.inferredMetrics(inf)
|
||||
.ioRates(ext.ioRates())
|
||||
.perBrokerScrapedMetrics(ext.perBrokerMetrics())
|
||||
.build()
|
||||
).doOnNext(this::sendMetricsToSink);
|
||||
}
|
||||
|
||||
private void sendMetricsToSink(Metrics metrics) {
|
||||
sink.send(prepareMetricsForSending(metrics))
|
||||
.doOnError(th -> log.warn("Error sending metrics to metrics sink", th))
|
||||
.subscribe();
|
||||
}
|
||||
|
||||
private Stream<MetricFamilySamples> prepareMetricsForSending(Metrics metrics) {
|
||||
return PrometheusExpose.prepareMetricsForGlobalExpose(clusterName, metrics);
|
||||
}
|
||||
|
||||
private Mono<PerBrokerScrapedMetrics> scrapeExternal(Collection<Node> nodes) {
|
||||
if (jmxMetricsScraper != null) {
|
||||
return jmxMetricsScraper.scrape(nodes);
|
||||
}
|
||||
if (prometheusScraper != null) {
|
||||
return prometheusScraper.scrape(nodes);
|
||||
}
|
||||
return Mono.just(PerBrokerScrapedMetrics.empty());
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
|
||||
import com.provectus.kafka.ui.model.Metrics;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public record PerBrokerScrapedMetrics(Map<Integer, List<MetricFamilySamples>> perBrokerMetrics) {
|
||||
|
||||
static PerBrokerScrapedMetrics empty() {
|
||||
return new PerBrokerScrapedMetrics(Map.of());
|
||||
}
|
||||
|
||||
Metrics.IoRates ioRates() {
|
||||
return new IoRatesMetricsScanner(perBrokerMetrics).get();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,198 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape;
|
||||
|
||||
import static com.provectus.kafka.ui.model.InternalLogDirStats.LogDirSpaceStats;
|
||||
import static com.provectus.kafka.ui.model.InternalLogDirStats.SegmentStats;
|
||||
import static com.provectus.kafka.ui.service.ReactiveAdminClient.ClusterDescription;
|
||||
|
||||
import com.google.common.collect.Table;
|
||||
import com.provectus.kafka.ui.model.InternalLogDirStats;
|
||||
import com.provectus.kafka.ui.model.InternalPartitionsOffsets;
|
||||
import com.provectus.kafka.ui.service.ReactiveAdminClient;
|
||||
import jakarta.annotation.Nullable;
|
||||
import java.time.Instant;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Builder;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.Value;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||
import org.apache.kafka.clients.admin.ConsumerGroupListing;
|
||||
import org.apache.kafka.clients.admin.OffsetSpec;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Builder(toBuilder = true)
|
||||
@RequiredArgsConstructor
|
||||
@Value
|
||||
public class ScrapedClusterState {
|
||||
|
||||
Instant scrapeFinishedAt;
|
||||
Map<Integer, NodeState> nodesStates;
|
||||
Map<String, TopicState> topicStates;
|
||||
Map<String, ConsumerGroupState> consumerGroupsStates;
|
||||
|
||||
public record NodeState(int id,
|
||||
Node node,
|
||||
@Nullable SegmentStats segmentStats,
|
||||
@Nullable LogDirSpaceStats logDirSpaceStats) {
|
||||
}
|
||||
|
||||
public record TopicState(
|
||||
String name,
|
||||
TopicDescription description,
|
||||
List<ConfigEntry> configs,
|
||||
Map<Integer, Long> startOffsets,
|
||||
Map<Integer, Long> endOffsets,
|
||||
@Nullable SegmentStats segmentStats,
|
||||
@Nullable Map<Integer, SegmentStats> partitionsSegmentStats) {
|
||||
}
|
||||
|
||||
public record ConsumerGroupState(
|
||||
String group,
|
||||
ConsumerGroupDescription description,
|
||||
Map<TopicPartition, Long> committedOffsets) {
|
||||
}
|
||||
|
||||
public static ScrapedClusterState empty() {
|
||||
return ScrapedClusterState.builder()
|
||||
.scrapeFinishedAt(Instant.now())
|
||||
.nodesStates(Map.of())
|
||||
.topicStates(Map.of())
|
||||
.consumerGroupsStates(Map.of())
|
||||
.build();
|
||||
}
|
||||
|
||||
public ScrapedClusterState updateTopics(Map<String, TopicDescription> descriptions,
|
||||
Map<String, List<ConfigEntry>> configs,
|
||||
InternalPartitionsOffsets partitionsOffsets) {
|
||||
var updatedTopicStates = new HashMap<>(topicStates);
|
||||
descriptions.forEach((topic, description) -> {
|
||||
SegmentStats segmentStats = null;
|
||||
Map<Integer, SegmentStats> partitionsSegmentStats = null;
|
||||
if (topicStates.containsKey(topic)) {
|
||||
segmentStats = topicStates.get(topic).segmentStats();
|
||||
partitionsSegmentStats = topicStates.get(topic).partitionsSegmentStats();
|
||||
}
|
||||
updatedTopicStates.put(
|
||||
topic,
|
||||
new TopicState(
|
||||
topic,
|
||||
description,
|
||||
configs.getOrDefault(topic, List.of()),
|
||||
partitionsOffsets.topicOffsets(topic, true),
|
||||
partitionsOffsets.topicOffsets(topic, false),
|
||||
segmentStats,
|
||||
partitionsSegmentStats
|
||||
)
|
||||
);
|
||||
});
|
||||
return toBuilder()
|
||||
.topicStates(updatedTopicStates)
|
||||
.build();
|
||||
}
|
||||
|
||||
public ScrapedClusterState topicDeleted(String topic) {
|
||||
var newTopicStates = new HashMap<>(topicStates);
|
||||
newTopicStates.remove(topic);
|
||||
return toBuilder()
|
||||
.topicStates(newTopicStates)
|
||||
.build();
|
||||
}
|
||||
|
||||
public static Mono<ScrapedClusterState> scrape(ClusterDescription clusterDescription,
|
||||
ReactiveAdminClient ac) {
|
||||
return Mono.zip(
|
||||
ac.describeLogDirs(clusterDescription.getNodes().stream().map(Node::id).toList())
|
||||
.map(InternalLogDirStats::new),
|
||||
ac.listConsumerGroups().map(l -> l.stream().map(ConsumerGroupListing::groupId).toList()),
|
||||
ac.describeTopics(),
|
||||
ac.getTopicsConfig()
|
||||
).flatMap(phase1 ->
|
||||
Mono.zip(
|
||||
ac.listOffsets(phase1.getT3().values(), OffsetSpec.latest()),
|
||||
ac.listOffsets(phase1.getT3().values(), OffsetSpec.earliest()),
|
||||
ac.describeConsumerGroups(phase1.getT2()),
|
||||
ac.listConsumerGroupOffsets(phase1.getT2(), null)
|
||||
).map(phase2 ->
|
||||
create(
|
||||
clusterDescription,
|
||||
phase1.getT1(),
|
||||
phase1.getT3(),
|
||||
phase1.getT4(),
|
||||
phase2.getT1(),
|
||||
phase2.getT2(),
|
||||
phase2.getT3(),
|
||||
phase2.getT4()
|
||||
)));
|
||||
}
|
||||
|
||||
private static ScrapedClusterState create(ClusterDescription clusterDescription,
|
||||
InternalLogDirStats segmentStats,
|
||||
Map<String, TopicDescription> topicDescriptions,
|
||||
Map<String, List<ConfigEntry>> topicConfigs,
|
||||
Map<TopicPartition, Long> latestOffsets,
|
||||
Map<TopicPartition, Long> earliestOffsets,
|
||||
Map<String, ConsumerGroupDescription> consumerDescriptions,
|
||||
Table<String, TopicPartition, Long> consumerOffsets) {
|
||||
|
||||
|
||||
Map<String, TopicState> topicStates = new HashMap<>();
|
||||
topicDescriptions.forEach((name, desc) ->
|
||||
topicStates.put(
|
||||
name,
|
||||
new TopicState(
|
||||
name,
|
||||
desc,
|
||||
topicConfigs.getOrDefault(name, List.of()),
|
||||
filterTopic(name, earliestOffsets),
|
||||
filterTopic(name, latestOffsets),
|
||||
segmentStats.getTopicStats().get(name),
|
||||
Optional.ofNullable(segmentStats.getPartitionsStats())
|
||||
.map(topicForFilter -> filterTopic(name, topicForFilter))
|
||||
.orElse(null)
|
||||
)));
|
||||
|
||||
Map<String, ConsumerGroupState> consumerGroupsStates = new HashMap<>();
|
||||
consumerDescriptions.forEach((name, desc) ->
|
||||
consumerGroupsStates.put(
|
||||
name,
|
||||
new ConsumerGroupState(
|
||||
name,
|
||||
desc,
|
||||
consumerOffsets.row(name)
|
||||
)));
|
||||
|
||||
Map<Integer, NodeState> nodesStates = new HashMap<>();
|
||||
clusterDescription.getNodes().forEach(node ->
|
||||
nodesStates.put(
|
||||
node.id(),
|
||||
new NodeState(
|
||||
node.id(),
|
||||
node,
|
||||
segmentStats.getBrokerStats().get(node.id()),
|
||||
segmentStats.getBrokerDirsStats().get(node.id())
|
||||
)));
|
||||
|
||||
return new ScrapedClusterState(
|
||||
Instant.now(),
|
||||
nodesStates,
|
||||
topicStates,
|
||||
consumerGroupsStates
|
||||
);
|
||||
}
|
||||
|
||||
private static <T> Map<Integer, T> filterTopic(String topicForFilter, Map<TopicPartition, T> tpMap) {
|
||||
return tpMap.entrySet()
|
||||
.stream()
|
||||
.filter(tp -> tp.getKey().topic().equals(topicForFilter))
|
||||
.collect(Collectors.toMap(e -> e.getKey().partition(), Map.Entry::getValue));
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape.inferred;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class InferredMetrics {
|
||||
|
||||
private final List<MetricFamilySamples> metrics;
|
||||
|
||||
public static InferredMetrics empty() {
|
||||
return new InferredMetrics(List.of());
|
||||
}
|
||||
|
||||
public InferredMetrics(List<MetricFamilySamples> metrics) {
|
||||
this.metrics = metrics;
|
||||
}
|
||||
|
||||
public Stream<MetricFamilySamples> asStream() {
|
||||
return metrics.stream();
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,226 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape.inferred;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.ScrapedClusterState;
|
||||
import io.prometheus.client.Collector.MetricFamilySamples;
|
||||
import io.prometheus.client.GaugeMetricFamily;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.admin.MemberDescription;
|
||||
import org.apache.kafka.common.Node;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Slf4j
|
||||
@RequiredArgsConstructor
|
||||
public class InferredMetricsScraper {
|
||||
|
||||
private ScrapedClusterState prevState = null;
|
||||
|
||||
public synchronized Mono<InferredMetrics> scrape(ScrapedClusterState newState) {
|
||||
var inferred = infer(prevState, newState);
|
||||
this.prevState = newState;
|
||||
return Mono.just(inferred);
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
static InferredMetrics infer(@Nullable ScrapedClusterState prevState, ScrapedClusterState newState) {
|
||||
var registry = new MetricsRegistry();
|
||||
fillNodesMetrics(registry, newState);
|
||||
fillTopicMetrics(registry, newState);
|
||||
fillConsumerGroupsMetrics(registry, newState);
|
||||
List<MetricFamilySamples> metrics = registry.metrics.values().stream().toList();
|
||||
log.debug("{} metric families inferred from cluster state", metrics.size());
|
||||
return new InferredMetrics(metrics);
|
||||
}
|
||||
|
||||
private static class MetricsRegistry {
|
||||
|
||||
final Map<String, MetricFamilySamples> metrics = new LinkedHashMap<>();
|
||||
|
||||
void gauge(String name,
|
||||
String help,
|
||||
List<String> lbls,
|
||||
List<String> lblVals,
|
||||
Number value) {
|
||||
GaugeMetricFamily gauge;
|
||||
if ((gauge = (GaugeMetricFamily) metrics.get(name)) == null) {
|
||||
gauge = new GaugeMetricFamily(name, help, lbls);
|
||||
metrics.put(name, gauge);
|
||||
}
|
||||
gauge.addMetric(lblVals, value.doubleValue());
|
||||
}
|
||||
}
|
||||
|
||||
private static void fillNodesMetrics(MetricsRegistry registry, ScrapedClusterState newState) {
|
||||
registry.gauge(
|
||||
"broker_count",
|
||||
"Number of brokers in the Kafka cluster",
|
||||
List.of(),
|
||||
List.of(),
|
||||
newState.getNodesStates().size()
|
||||
);
|
||||
|
||||
newState.getNodesStates().forEach((nodeId, state) -> {
|
||||
if (state.segmentStats() != null) {
|
||||
registry.gauge(
|
||||
"broker_bytes_disk",
|
||||
"Written disk size in bytes of a broker",
|
||||
List.of("node_id"),
|
||||
List.of(nodeId.toString()),
|
||||
state.segmentStats().getSegmentSize()
|
||||
);
|
||||
}
|
||||
if (state.logDirSpaceStats() != null) {
|
||||
if (state.logDirSpaceStats().usableBytes() != null) {
|
||||
registry.gauge(
|
||||
"broker_bytes_usable",
|
||||
"Usable disk size in bytes of a broker",
|
||||
List.of("node_id"),
|
||||
List.of(nodeId.toString()),
|
||||
state.logDirSpaceStats().usableBytes()
|
||||
);
|
||||
}
|
||||
if (state.logDirSpaceStats().totalBytes() != null) {
|
||||
registry.gauge(
|
||||
"broker_bytes_total",
|
||||
"Total disk size in bytes of a broker",
|
||||
List.of("node_id"),
|
||||
List.of(nodeId.toString()),
|
||||
state.logDirSpaceStats().totalBytes()
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static void fillTopicMetrics(MetricsRegistry registry, ScrapedClusterState clusterState) {
|
||||
registry.gauge(
|
||||
"topic_count",
|
||||
"Number of topics in the Kafka cluster",
|
||||
List.of(),
|
||||
List.of(),
|
||||
clusterState.getTopicStates().size()
|
||||
);
|
||||
|
||||
clusterState.getTopicStates().forEach((topicName, state) -> {
|
||||
registry.gauge(
|
||||
"kafka_topic_partitions",
|
||||
"Number of partitions for this Topic",
|
||||
List.of("topic"),
|
||||
List.of(topicName),
|
||||
state.description().partitions().size()
|
||||
);
|
||||
state.endOffsets().forEach((partition, endOffset) -> {
|
||||
registry.gauge(
|
||||
"kafka_topic_partition_current_offset",
|
||||
"Current Offset of a Broker at Topic/Partition",
|
||||
List.of("topic", "partition"),
|
||||
List.of(topicName, String.valueOf(partition)),
|
||||
endOffset
|
||||
);
|
||||
});
|
||||
state.startOffsets().forEach((partition, startOffset) -> {
|
||||
registry.gauge(
|
||||
"kafka_topic_partition_oldest_offset",
|
||||
"Oldest Offset of a Broker at Topic/Partition",
|
||||
List.of("topic", "partition"),
|
||||
List.of(topicName, String.valueOf(partition)),
|
||||
startOffset
|
||||
);
|
||||
});
|
||||
state.description().partitions().forEach(p -> {
|
||||
registry.gauge(
|
||||
"kafka_topic_partition_in_sync_replica",
|
||||
"Number of In-Sync Replicas for this Topic/Partition",
|
||||
List.of("topic", "partition"),
|
||||
List.of(topicName, String.valueOf(p.partition())),
|
||||
p.isr().size()
|
||||
);
|
||||
registry.gauge(
|
||||
"kafka_topic_partition_replicas",
|
||||
"Number of Replicas for this Topic/Partition",
|
||||
List.of("topic", "partition"),
|
||||
List.of(topicName, String.valueOf(p.partition())),
|
||||
p.replicas().size()
|
||||
);
|
||||
registry.gauge(
|
||||
"kafka_topic_partition_leader",
|
||||
"Leader Broker ID of this Topic/Partition (-1, if no leader)",
|
||||
List.of("topic", "partition"),
|
||||
List.of(topicName, String.valueOf(p.partition())),
|
||||
Optional.ofNullable(p.leader()).map(Node::id).orElse(-1)
|
||||
);
|
||||
});
|
||||
if (state.segmentStats() != null) {
|
||||
registry.gauge(
|
||||
"topic_bytes_disk",
|
||||
"Disk size in bytes of a topic",
|
||||
List.of("topic"),
|
||||
List.of(topicName),
|
||||
state.segmentStats().getSegmentSize()
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static void fillConsumerGroupsMetrics(MetricsRegistry registry, ScrapedClusterState clusterState) {
|
||||
registry.gauge(
|
||||
"group_count",
|
||||
"Number of consumer groups in the Kafka cluster",
|
||||
List.of(),
|
||||
List.of(),
|
||||
clusterState.getConsumerGroupsStates().size()
|
||||
);
|
||||
|
||||
clusterState.getConsumerGroupsStates().forEach((groupName, state) -> {
|
||||
registry.gauge(
|
||||
"group_state",
|
||||
"State of the consumer group, value = ordinal of org.apache.kafka.common.ConsumerGroupState",
|
||||
List.of("group"),
|
||||
List.of(groupName),
|
||||
state.description().state().ordinal()
|
||||
);
|
||||
registry.gauge(
|
||||
"group_member_count",
|
||||
"Number of member assignments in the consumer group.",
|
||||
List.of("group"),
|
||||
List.of(groupName),
|
||||
state.description().members().size()
|
||||
);
|
||||
registry.gauge(
|
||||
"group_host_count",
|
||||
"Number of distinct hosts in the consumer group.",
|
||||
List.of("group"),
|
||||
List.of(groupName),
|
||||
state.description().members().stream().map(MemberDescription::host).distinct().count()
|
||||
);
|
||||
|
||||
state.committedOffsets().forEach((tp, committedOffset) -> {
|
||||
registry.gauge(
|
||||
"kafka_consumergroup_current_offset",
|
||||
"Current Offset of a ConsumerGroup at Topic/Partition",
|
||||
List.of("consumergroup", "topic", "partition"),
|
||||
List.of(groupName, tp.topic(), String.valueOf(tp.partition())),
|
||||
committedOffset
|
||||
);
|
||||
|
||||
Optional.ofNullable(clusterState.getTopicStates().get(tp.topic()))
|
||||
.flatMap(s -> Optional.ofNullable(s.endOffsets().get(tp.partition())))
|
||||
.ifPresent(endOffset ->
|
||||
registry.gauge(
|
||||
"kafka_consumergroup_lag",
|
||||
"Current Approximate Lag of a ConsumerGroup at Topic/Partition",
|
||||
List.of("consumergroup", "topic", "partition"),
|
||||
List.of(groupName, tp.topic(), String.valueOf(tp.partition())),
|
||||
endOffset - committedOffset //TODO: check +-1
|
||||
));
|
||||
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,5 +1,7 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
package com.provectus.kafka.ui.service.metrics.scrape.jmx;
|
||||
|
||||
import com.provectus.kafka.ui.service.metrics.RawMetric;
|
||||
import io.prometheus.client.Collector;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.LinkedHashMap;
|
|
@ -1,6 +1,7 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
package com.provectus.kafka.ui.service.metrics.scrape.jmx;
|
||||
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.MetricsScrapeProperties;
|
||||
import com.provectus.kafka.ui.service.metrics.RawMetric;
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -17,15 +18,15 @@ import lombok.SneakyThrows;
|
|||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
|
||||
|
||||
@Service
|
||||
@Component //need to be a component, since
|
||||
@Slf4j
|
||||
class JmxMetricsRetriever implements MetricsRetriever, Closeable {
|
||||
public class JmxMetricsRetriever implements Closeable {
|
||||
|
||||
private static final boolean SSL_JMX_SUPPORTED;
|
||||
|
||||
|
@ -43,35 +44,34 @@ class JmxMetricsRetriever implements MetricsRetriever, Closeable {
|
|||
JmxSslSocketFactory.clearFactoriesCache();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<RawMetric> retrieve(KafkaCluster c, Node node) {
|
||||
if (isSslJmxEndpoint(c) && !SSL_JMX_SUPPORTED) {
|
||||
log.warn("Cluster {} has jmx ssl configured, but it is not supported", c.getName());
|
||||
return Flux.empty();
|
||||
public Mono<List<RawMetric>> retrieveFromNode(MetricsScrapeProperties scrapeProperties, Node node) {
|
||||
if (isSslJmxEndpoint(scrapeProperties) && !SSL_JMX_SUPPORTED) {
|
||||
log.warn("Cluster has jmx ssl configured, but it is not supported by app");
|
||||
return Mono.just(List.of());
|
||||
}
|
||||
return Mono.fromSupplier(() -> retrieveSync(c, node))
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
.flatMapMany(Flux::fromIterable);
|
||||
return Mono.fromSupplier(() -> retrieveSync(scrapeProperties, node))
|
||||
.subscribeOn(Schedulers.boundedElastic());
|
||||
}
|
||||
|
||||
private boolean isSslJmxEndpoint(KafkaCluster cluster) {
|
||||
return cluster.getMetricsConfig().getKeystoreLocation() != null;
|
||||
private boolean isSslJmxEndpoint(MetricsScrapeProperties scrapeProperties) {
|
||||
return scrapeProperties.getKeystoreConfig() != null
|
||||
&& scrapeProperties.getKeystoreConfig().getKeystoreLocation() != null;
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private List<RawMetric> retrieveSync(KafkaCluster c, Node node) {
|
||||
String jmxUrl = JMX_URL + node.host() + ":" + c.getMetricsConfig().getPort() + "/" + JMX_SERVICE_TYPE;
|
||||
private List<RawMetric> retrieveSync(MetricsScrapeProperties scrapeProperties, Node node) {
|
||||
String jmxUrl = JMX_URL + node.host() + ":" + scrapeProperties.getPort() + "/" + JMX_SERVICE_TYPE;
|
||||
log.debug("Collection JMX metrics for {}", jmxUrl);
|
||||
List<RawMetric> result = new ArrayList<>();
|
||||
withJmxConnector(jmxUrl, c, jmxConnector -> getMetricsFromJmx(jmxConnector, result));
|
||||
withJmxConnector(jmxUrl, scrapeProperties, jmxConnector -> getMetricsFromJmx(jmxConnector, result));
|
||||
log.debug("{} metrics collected for {}", result.size(), jmxUrl);
|
||||
return result;
|
||||
}
|
||||
|
||||
private void withJmxConnector(String jmxUrl,
|
||||
KafkaCluster c,
|
||||
MetricsScrapeProperties scrapeProperties,
|
||||
Consumer<JMXConnector> consumer) {
|
||||
var env = prepareJmxEnvAndSetThreadLocal(c);
|
||||
var env = prepareJmxEnvAndSetThreadLocal(scrapeProperties);
|
||||
try (JMXConnector connector = JMXConnectorFactory.newJMXConnector(new JMXServiceURL(jmxUrl), env)) {
|
||||
try {
|
||||
connector.connect(env);
|
||||
|
@ -87,25 +87,25 @@ class JmxMetricsRetriever implements MetricsRetriever, Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private Map<String, Object> prepareJmxEnvAndSetThreadLocal(KafkaCluster cluster) {
|
||||
var metricsConfig = cluster.getMetricsConfig();
|
||||
private Map<String, Object> prepareJmxEnvAndSetThreadLocal(MetricsScrapeProperties scrapeProperties) {
|
||||
Map<String, Object> env = new HashMap<>();
|
||||
if (isSslJmxEndpoint(cluster)) {
|
||||
var clusterSsl = cluster.getOriginalProperties().getSsl();
|
||||
if (isSslJmxEndpoint(scrapeProperties)) {
|
||||
var truststoreConfig = scrapeProperties.getTruststoreConfig();
|
||||
var keystoreConfig = scrapeProperties.getKeystoreConfig();
|
||||
JmxSslSocketFactory.setSslContextThreadLocal(
|
||||
clusterSsl != null ? clusterSsl.getTruststoreLocation() : null,
|
||||
clusterSsl != null ? clusterSsl.getTruststorePassword() : null,
|
||||
metricsConfig.getKeystoreLocation(),
|
||||
metricsConfig.getKeystorePassword()
|
||||
truststoreConfig != null ? truststoreConfig.getTruststoreLocation() : null,
|
||||
truststoreConfig != null ? truststoreConfig.getTruststorePassword() : null,
|
||||
keystoreConfig != null ? keystoreConfig.getKeystoreLocation() : null,
|
||||
keystoreConfig != null ? keystoreConfig.getKeystorePassword() : null
|
||||
);
|
||||
JmxSslSocketFactory.editJmxConnectorEnv(env);
|
||||
}
|
||||
|
||||
if (StringUtils.isNotEmpty(metricsConfig.getUsername())
|
||||
&& StringUtils.isNotEmpty(metricsConfig.getPassword())) {
|
||||
if (StringUtils.isNotEmpty(scrapeProperties.getUsername())
|
||||
&& StringUtils.isNotEmpty(scrapeProperties.getPassword())) {
|
||||
env.put(
|
||||
JMXConnector.CREDENTIALS,
|
||||
new String[] {metricsConfig.getUsername(), metricsConfig.getPassword()}
|
||||
new String[] {scrapeProperties.getUsername(), scrapeProperties.getPassword()}
|
||||
);
|
||||
}
|
||||
return env;
|
|
@ -0,0 +1,36 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape.jmx;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
|
||||
import com.provectus.kafka.ui.model.MetricsScrapeProperties;
|
||||
import com.provectus.kafka.ui.service.metrics.RawMetric;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.PerBrokerScrapedMetrics;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.Node;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
public class JmxMetricsScraper {
|
||||
|
||||
private final JmxMetricsRetriever jmxMetricsRetriever;
|
||||
private final MetricsScrapeProperties scrapeProperties;
|
||||
|
||||
public JmxMetricsScraper(MetricsScrapeProperties scrapeProperties,
|
||||
JmxMetricsRetriever jmxMetricsRetriever) {
|
||||
this.scrapeProperties = scrapeProperties;
|
||||
this.jmxMetricsRetriever = jmxMetricsRetriever;
|
||||
}
|
||||
|
||||
public Mono<PerBrokerScrapedMetrics> scrape(Collection<Node> nodes) {
|
||||
Mono<Map<Integer, List<MetricFamilySamples>>> collected = Flux.fromIterable(nodes)
|
||||
.flatMap(n -> jmxMetricsRetriever.retrieveFromNode(scrapeProperties, n).map(metrics -> Tuples.of(n, metrics)))
|
||||
.collectMap(
|
||||
t -> t.getT1().id(),
|
||||
t -> RawMetric.groupIntoMfs(t.getT2()).toList()
|
||||
);
|
||||
return collected.map(PerBrokerScrapedMetrics::new);
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package com.provectus.kafka.ui.service.metrics;
|
||||
package com.provectus.kafka.ui.service.metrics.scrape.jmx;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import java.io.FileInputStream;
|
||||
|
@ -61,9 +61,8 @@ class JmxSslSocketFactory extends javax.net.ssl.SSLSocketFactory {
|
|||
} catch (Exception e) {
|
||||
log.error("----------------------------------");
|
||||
log.error("SSL can't be enabled for JMX retrieval. "
|
||||
+ "Make sure your java app run with '--add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED' arg. Err: {}",
|
||||
+ "Make sure your java app is running with '--add-opens java.rmi/javax.rmi.ssl=ALL-UNNAMED' arg. Err: {}",
|
||||
e.getMessage());
|
||||
log.trace("SSL can't be enabled for JMX retrieval", e);
|
||||
log.error("----------------------------------");
|
||||
}
|
||||
SSL_JMX_SUPPORTED = sslJmxSupported;
|
|
@ -0,0 +1,317 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape.prometheus;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples.Sample;
|
||||
|
||||
import com.google.common.base.Enums;
|
||||
import io.prometheus.client.Collector.MetricFamilySamples;
|
||||
import io.prometheus.client.Collector.Type;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class PrometheusEndpointParser {
|
||||
|
||||
// will be set if no TYPE provided (or it is unsupported)
|
||||
private static final Type DEFAULT_TYPE = Type.GAUGE;
|
||||
|
||||
private PrometheusEndpointParser() {
|
||||
}
|
||||
|
||||
private static class ParserContext {
|
||||
final List<MetricFamilySamples> registered = new ArrayList<>();
|
||||
|
||||
String name;
|
||||
String help;
|
||||
Type type;
|
||||
String unit;
|
||||
Set<String> allowedNames = new HashSet<>();
|
||||
List<Sample> samples = new ArrayList<>();
|
||||
|
||||
void registerAndReset() {
|
||||
if (!samples.isEmpty()) {
|
||||
registered.add(
|
||||
new MetricFamilySamples(
|
||||
name,
|
||||
Optional.ofNullable(unit).orElse(""),
|
||||
type,
|
||||
Optional.ofNullable(help).orElse(name),
|
||||
List.copyOf(samples))
|
||||
);
|
||||
}
|
||||
//resetting state:
|
||||
name = null;
|
||||
help = null;
|
||||
type = null;
|
||||
unit = null;
|
||||
allowedNames.clear();
|
||||
samples.clear();
|
||||
}
|
||||
|
||||
List<MetricFamilySamples> getRegistered() {
|
||||
registerAndReset(); // last in progress metric should be registered
|
||||
return registered;
|
||||
}
|
||||
}
|
||||
|
||||
// general logic taken from https://github.com/prometheus/client_python/blob/master/prometheus_client/parser.py
|
||||
public static List<MetricFamilySamples> parse(Stream<String> lines) {
|
||||
ParserContext context = new ParserContext();
|
||||
lines.map(String::trim)
|
||||
.filter(s -> !s.isBlank())
|
||||
.forEach(line -> {
|
||||
if (line.charAt(0) == '#') {
|
||||
String[] parts = line.split("[ \t]+", 4);
|
||||
if (parts.length >= 3) {
|
||||
switch (parts[1]) {
|
||||
case "HELP" -> processHelp(context, parts);
|
||||
case "TYPE" -> processType(context, parts);
|
||||
case "UNIT" -> processUnit(context, parts);
|
||||
default -> { /* probably a comment */ }
|
||||
}
|
||||
}
|
||||
} else {
|
||||
processSample(context, line);
|
||||
}
|
||||
});
|
||||
return context.getRegistered();
|
||||
}
|
||||
|
||||
private static void processUnit(ParserContext context, String[] parts) {
|
||||
if (!parts[2].equals(context.name)) {
|
||||
// starting new metric family - need to register (if possible) prev one
|
||||
context.registerAndReset();
|
||||
context.name = parts[2];
|
||||
context.type = DEFAULT_TYPE;
|
||||
context.allowedNames.add(context.name);
|
||||
}
|
||||
if (parts.length == 4) {
|
||||
context.unit = parts[3];
|
||||
}
|
||||
}
|
||||
|
||||
private static void processHelp(ParserContext context, String[] parts) {
|
||||
if (!parts[2].equals(context.name)) {
|
||||
// starting new metric family - need to register (if possible) prev one
|
||||
context.registerAndReset();
|
||||
context.name = parts[2];
|
||||
context.type = DEFAULT_TYPE;
|
||||
context.allowedNames.add(context.name);
|
||||
}
|
||||
if (parts.length == 4) {
|
||||
context.help = unescapeHelp(parts[3]);
|
||||
}
|
||||
}
|
||||
|
||||
private static void processType(ParserContext context, String[] parts) {
|
||||
if (!parts[2].equals(context.name)) {
|
||||
// starting new metric family - need to register (if possible) prev one
|
||||
context.registerAndReset();
|
||||
context.name = parts[2];
|
||||
}
|
||||
|
||||
context.type = Enums.getIfPresent(Type.class, parts[3].toUpperCase()).or(DEFAULT_TYPE);
|
||||
switch (context.type) {
|
||||
case SUMMARY -> {
|
||||
context.allowedNames.add(context.name);
|
||||
context.allowedNames.add(context.name + "_count");
|
||||
context.allowedNames.add(context.name + "_sum");
|
||||
context.allowedNames.add(context.name + "_created");
|
||||
}
|
||||
case HISTOGRAM -> {
|
||||
context.allowedNames.add(context.name + "_count");
|
||||
context.allowedNames.add(context.name + "_sum");
|
||||
context.allowedNames.add(context.name + "_bucket");
|
||||
context.allowedNames.add(context.name + "_created");
|
||||
}
|
||||
case COUNTER -> {
|
||||
context.allowedNames.add(context.name);
|
||||
context.allowedNames.add(context.name + "_total");
|
||||
context.allowedNames.add(context.name + "_created");
|
||||
}
|
||||
case INFO -> {
|
||||
context.allowedNames.add(context.name);
|
||||
context.allowedNames.add(context.name + "_info");
|
||||
}
|
||||
default -> context.allowedNames.add(context.name);
|
||||
}
|
||||
}
|
||||
|
||||
private static void processSample(ParserContext context, String line) {
|
||||
parseSampleLine(line).ifPresent(sample -> {
|
||||
if (!context.allowedNames.contains(sample.name)) {
|
||||
// starting new metric family - need to register (if possible) prev one
|
||||
context.registerAndReset();
|
||||
context.name = sample.name;
|
||||
context.type = DEFAULT_TYPE;
|
||||
context.allowedNames.add(sample.name);
|
||||
}
|
||||
context.samples.add(sample);
|
||||
});
|
||||
}
|
||||
|
||||
private static String unescapeHelp(String text) {
|
||||
// algorithm from https://github.com/prometheus/client_python/blob/a2dae6caeaf3c300db416ba10a2a3271693addd4/prometheus_client/parser.py
|
||||
if (text == null || !text.contains("\\")) {
|
||||
return text;
|
||||
}
|
||||
StringBuilder result = new StringBuilder();
|
||||
boolean slash = false;
|
||||
for (int c = 0; c < text.length(); c++) {
|
||||
char charAt = text.charAt(c);
|
||||
if (slash) {
|
||||
if (charAt == '\\') {
|
||||
result.append('\\');
|
||||
} else if (charAt == 'n') {
|
||||
result.append('\n');
|
||||
} else {
|
||||
result.append('\\').append(charAt);
|
||||
}
|
||||
slash = false;
|
||||
} else {
|
||||
if (charAt == '\\') {
|
||||
slash = true;
|
||||
} else {
|
||||
result.append(charAt);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (slash) {
|
||||
result.append("\\");
|
||||
}
|
||||
return result.toString();
|
||||
}
|
||||
|
||||
//returns empty if line is not valid sample string
|
||||
private static Optional<Sample> parseSampleLine(String line) {
|
||||
// algorithm copied from https://github.com/prometheus/client_python/blob/a2dae6caeaf3c300db416ba10a2a3271693addd4/prometheus_client/parser.py
|
||||
StringBuilder name = new StringBuilder();
|
||||
StringBuilder labelname = new StringBuilder();
|
||||
StringBuilder labelvalue = new StringBuilder();
|
||||
StringBuilder value = new StringBuilder();
|
||||
List<String> lblNames = new ArrayList<>();
|
||||
List<String> lblVals = new ArrayList<>();
|
||||
|
||||
String state = "name";
|
||||
|
||||
for (int c = 0; c < line.length(); c++) {
|
||||
char charAt = line.charAt(c);
|
||||
if (state.equals("name")) {
|
||||
if (charAt == '{') {
|
||||
state = "startoflabelname";
|
||||
} else if (charAt == ' ' || charAt == '\t') {
|
||||
state = "endofname";
|
||||
} else {
|
||||
name.append(charAt);
|
||||
}
|
||||
} else if (state.equals("endofname")) {
|
||||
if (charAt == ' ' || charAt == '\t') {
|
||||
// do nothing
|
||||
} else if (charAt == '{') {
|
||||
state = "startoflabelname";
|
||||
} else {
|
||||
value.append(charAt);
|
||||
state = "value";
|
||||
}
|
||||
} else if (state.equals("startoflabelname")) {
|
||||
if (charAt == ' ' || charAt == '\t') {
|
||||
// do nothing
|
||||
} else if (charAt == '}') {
|
||||
state = "endoflabels";
|
||||
} else {
|
||||
labelname.append(charAt);
|
||||
state = "labelname";
|
||||
}
|
||||
} else if (state.equals("labelname")) {
|
||||
if (charAt == '=') {
|
||||
state = "labelvaluequote";
|
||||
} else if (charAt == '}') {
|
||||
state = "endoflabels";
|
||||
} else if (charAt == ' ' || charAt == '\t') {
|
||||
state = "labelvalueequals";
|
||||
} else {
|
||||
labelname.append(charAt);
|
||||
}
|
||||
} else if (state.equals("labelvalueequals")) {
|
||||
if (charAt == '=') {
|
||||
state = "labelvaluequote";
|
||||
} else if (charAt == ' ' || charAt == '\t') {
|
||||
// do nothing
|
||||
} else {
|
||||
return Optional.empty();
|
||||
}
|
||||
} else if (state.equals("labelvaluequote")) {
|
||||
if (charAt == '"') {
|
||||
state = "labelvalue";
|
||||
} else if (charAt == ' ' || charAt == '\t') {
|
||||
// do nothing
|
||||
} else {
|
||||
return Optional.empty();
|
||||
}
|
||||
} else if (state.equals("labelvalue")) {
|
||||
if (charAt == '\\') {
|
||||
state = "labelvalueslash";
|
||||
} else if (charAt == '"') {
|
||||
lblNames.add(labelname.toString());
|
||||
lblVals.add(labelvalue.toString());
|
||||
labelname.setLength(0);
|
||||
labelvalue.setLength(0);
|
||||
state = "nextlabel";
|
||||
} else {
|
||||
labelvalue.append(charAt);
|
||||
}
|
||||
} else if (state.equals("labelvalueslash")) {
|
||||
state = "labelvalue";
|
||||
if (charAt == '\\') {
|
||||
labelvalue.append('\\');
|
||||
} else if (charAt == 'n') {
|
||||
labelvalue.append('\n');
|
||||
} else if (charAt == '"') {
|
||||
labelvalue.append('"');
|
||||
} else {
|
||||
labelvalue.append('\\').append(charAt);
|
||||
}
|
||||
} else if (state.equals("nextlabel")) {
|
||||
if (charAt == ',') {
|
||||
state = "labelname";
|
||||
} else if (charAt == '}') {
|
||||
state = "endoflabels";
|
||||
} else if (charAt == ' ' || charAt == '\t') {
|
||||
// do nothing
|
||||
} else {
|
||||
return Optional.empty();
|
||||
}
|
||||
} else if (state.equals("endoflabels")) {
|
||||
if (charAt == ' ' || charAt == '\t') {
|
||||
// do nothing
|
||||
} else {
|
||||
value.append(charAt);
|
||||
state = "value";
|
||||
}
|
||||
} else if (state.equals("value")) {
|
||||
if (charAt == ' ' || charAt == '\t') {
|
||||
break; // timestamps are NOT supported - ignoring
|
||||
} else {
|
||||
value.append(charAt);
|
||||
}
|
||||
}
|
||||
}
|
||||
return Optional.of(new Sample(name.toString(), lblNames, lblVals, parseDouble(value.toString())));
|
||||
}
|
||||
|
||||
private static double parseDouble(String valueString) {
|
||||
if (valueString.equalsIgnoreCase("NaN")) {
|
||||
return Double.NaN;
|
||||
} else if (valueString.equalsIgnoreCase("+Inf")) {
|
||||
return Double.POSITIVE_INFINITY;
|
||||
} else if (valueString.equalsIgnoreCase("-Inf")) {
|
||||
return Double.NEGATIVE_INFINITY;
|
||||
}
|
||||
return Double.parseDouble(valueString);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape.prometheus;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
|
||||
import com.provectus.kafka.ui.model.MetricsScrapeProperties;
|
||||
import com.provectus.kafka.ui.util.WebClientConfigurator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.util.unit.DataSize;
|
||||
import org.springframework.web.reactive.function.client.WebClient;
|
||||
import org.springframework.web.util.UriComponentsBuilder;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Slf4j
|
||||
class PrometheusMetricsRetriever {
|
||||
|
||||
private static final String METRICS_ENDPOINT_PATH = "/metrics";
|
||||
private static final int DEFAULT_EXPORTER_PORT = 11001;
|
||||
|
||||
private final int port;
|
||||
private final boolean sslEnabled;
|
||||
private final WebClient webClient;
|
||||
|
||||
PrometheusMetricsRetriever(MetricsScrapeProperties scrapeProperties) {
|
||||
this.port = Optional.ofNullable(scrapeProperties.getPort()).orElse(DEFAULT_EXPORTER_PORT);
|
||||
this.sslEnabled = scrapeProperties.isSsl() || scrapeProperties.getKeystoreConfig() != null;
|
||||
this.webClient = new WebClientConfigurator()
|
||||
.configureBufferSize(DataSize.ofMegabytes(20))
|
||||
.configureBasicAuth(scrapeProperties.getUsername(), scrapeProperties.getPassword())
|
||||
.configureSsl(scrapeProperties.getTruststoreConfig(), scrapeProperties.getKeystoreConfig())
|
||||
.build();
|
||||
}
|
||||
|
||||
Mono<List<MetricFamilySamples>> retrieve(String host) {
|
||||
log.debug("Retrieving metrics from prometheus endpoint: {}:{}", host, port);
|
||||
|
||||
var uri = UriComponentsBuilder.newInstance()
|
||||
.scheme(sslEnabled ? "https" : "http")
|
||||
.host(host)
|
||||
.port(port)
|
||||
.path(METRICS_ENDPOINT_PATH)
|
||||
.build()
|
||||
.toUri();
|
||||
|
||||
return webClient.get()
|
||||
.uri(uri)
|
||||
.retrieve()
|
||||
.bodyToMono(String.class)
|
||||
.doOnError(e -> log.error("Error while getting metrics from {}", host, e))
|
||||
.map(body -> PrometheusEndpointParser.parse(body.lines()))
|
||||
.onErrorResume(th -> Mono.just(List.of()));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package com.provectus.kafka.ui.service.metrics.scrape.prometheus;
|
||||
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
|
||||
import com.provectus.kafka.ui.model.MetricsScrapeProperties;
|
||||
import com.provectus.kafka.ui.service.metrics.scrape.PerBrokerScrapedMetrics;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.Node;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
public class PrometheusScraper {
|
||||
|
||||
private final PrometheusMetricsRetriever retriever;
|
||||
|
||||
public PrometheusScraper(MetricsScrapeProperties scrapeProperties) {
|
||||
this.retriever = new PrometheusMetricsRetriever(scrapeProperties);
|
||||
}
|
||||
|
||||
public Mono<PerBrokerScrapedMetrics> scrape(Collection<Node> clusterNodes) {
|
||||
Mono<Map<Integer, List<MetricFamilySamples>>> collected = Flux.fromIterable(clusterNodes)
|
||||
.flatMap(n -> retriever.retrieve(n.host()).map(metrics -> Tuples.of(n, metrics)))
|
||||
.collectMap(t -> t.getT1().id(), Tuple2::getT2);
|
||||
return collected.map(PerBrokerScrapedMetrics::new);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package com.provectus.kafka.ui.service.metrics.sink;
|
||||
|
||||
import static com.provectus.kafka.ui.service.MessagesService.createProducer;
|
||||
import static com.provectus.kafka.ui.service.metrics.prometheus.PrometheusExpose.escapedLabelValue;
|
||||
import static io.prometheus.client.Collector.MetricFamilySamples;
|
||||
import static io.prometheus.client.Collector.doubleToGoString;
|
||||
import static org.apache.kafka.clients.producer.ProducerConfig.COMPRESSION_TYPE_CONFIG;
|
||||
|
||||
import com.fasterxml.jackson.databind.json.JsonMapper;
|
||||
import com.provectus.kafka.ui.config.ClustersProperties;
|
||||
import java.time.Instant;
|
||||
import java.time.ZoneOffset;
|
||||
import java.time.format.DateTimeFormatter;
|
||||
import java.time.temporal.ChronoUnit;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.kafka.clients.producer.Producer;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
/*
|
||||
* Format of records copied from https://github.com/Telefonica/prometheus-kafka-adapter
|
||||
*/
|
||||
@RequiredArgsConstructor
|
||||
class KafkaSink implements MetricsSink {
|
||||
|
||||
record KafkaMetric(String timestamp, String value, String name, Map<String, String> labels) { }
|
||||
|
||||
private static final JsonMapper JSON_MAPPER = new JsonMapper();
|
||||
|
||||
private static final Map<String, Object> PRODUCER_ADDITIONAL_CONFIGS = Map.of(COMPRESSION_TYPE_CONFIG, "gzip");
|
||||
|
||||
private final String topic;
|
||||
private final Producer<byte[], byte[]> producer;
|
||||
|
||||
static KafkaSink create(ClustersProperties.Cluster cluster, String targetTopic) {
|
||||
return new KafkaSink(targetTopic, createProducer(cluster, PRODUCER_ADDITIONAL_CONFIGS));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> send(Stream<MetricFamilySamples> metrics) {
|
||||
return Mono.fromRunnable(() -> {
|
||||
String ts = Instant.now()
|
||||
.truncatedTo(ChronoUnit.SECONDS)
|
||||
.atZone(ZoneOffset.UTC)
|
||||
.format(DateTimeFormatter.ISO_DATE_TIME);
|
||||
|
||||
metrics.flatMap(m -> createRecord(ts, m)).forEach(producer::send);
|
||||
});
|
||||
}
|
||||
|
||||
private Stream<ProducerRecord<byte[], byte[]>> createRecord(String ts, MetricFamilySamples metrics) {
|
||||
return metrics.samples.stream()
|
||||
.map(sample -> {
|
||||
var lbls = new LinkedHashMap<String, String>();
|
||||
lbls.put("__name__", sample.name);
|
||||
for (int i = 0; i < sample.labelNames.size(); i++) {
|
||||
lbls.put(sample.labelNames.get(i), escapedLabelValue(sample.labelValues.get(i)));
|
||||
}
|
||||
var km = new KafkaMetric(ts, doubleToGoString(sample.value), sample.name, lbls);
|
||||
return new ProducerRecord<>(topic, toJsonBytes(km));
|
||||
});
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private static byte[] toJsonBytes(KafkaMetric m) {
|
||||
return JSON_MAPPER.writeValueAsBytes(m);
|
||||
}
|
||||
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue