Merge branch 'master' into issue207/ksqldb
This commit is contained in:
commit
3bf5a08d3a
216 changed files with 13202 additions and 24484 deletions
2
.github/workflows/backend.yml
vendored
2
.github/workflows/backend.yml
vendored
|
@ -20,7 +20,7 @@ jobs:
|
|||
- name: Set the values
|
||||
id: step_one
|
||||
run: |
|
||||
cat "./kafka-ui-e2e-checks/.env.example" >> "./kafka-ui-e2e-checks/.env"
|
||||
cat "./kafka-ui-e2e-checks/.env.ci" >> "./kafka-ui-e2e-checks/.env"
|
||||
- name: pull docker
|
||||
id: step_four
|
||||
run: |
|
||||
|
|
2
.github/workflows/pr-checks.yaml
vendored
2
.github/workflows/pr-checks.yaml
vendored
|
@ -10,6 +10,6 @@ jobs:
|
|||
- uses: kentaro-m/task-completed-checker-action@v0.1.0
|
||||
with:
|
||||
repo-token: "${{ secrets.GITHUB_TOKEN }}"
|
||||
- uses: derkinderfietsen/pr-description-enforcer@v1
|
||||
- uses: dekinderfiets/pr-description-enforcer@0.0.1
|
||||
with:
|
||||
repo-token: '${{ secrets.GITHUB_TOKEN }}'
|
62
.github/workflows/release.yaml
vendored
62
.github/workflows/release.yaml
vendored
|
@ -1,14 +1,29 @@
|
|||
name: release
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
customVersion:
|
||||
description: 'A new version for release, please provide with -SNAPSHOT suffix'
|
||||
required: false
|
||||
default: '0.0.0'
|
||||
rebuild:
|
||||
description: 'A tag name for building previously created release'
|
||||
required: false
|
||||
default: 'v0.0.0'
|
||||
extraMavenOptions:
|
||||
description: 'A extra options for Maven'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
version: ${{steps.prep.outputs.version}}
|
||||
version: ${{steps.build.outputs.version}}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- run: |
|
||||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
|
@ -19,12 +34,22 @@ jobs:
|
|||
key: ${{ runner.os }}-maven-${{ hashFiles('**/pom.xml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-maven-
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up JDK 1.13
|
||||
uses: actions/setup-java@v1
|
||||
with:
|
||||
java-version: 1.13
|
||||
- name: Checkout to specific tag
|
||||
if: github.event.inputs.rebuild != 'v0.0.0'
|
||||
run: |
|
||||
git checkout tags/${{ github.event.inputs.rebuild }} -b rebuild-${{ github.event.inputs.rebuild }}
|
||||
- name: Set custom version
|
||||
if: github.event.inputs.customVersion != '0.0.0' && github.event.inputs.rebuild == 'v0.0.0'
|
||||
run: |
|
||||
mvn -q versions:set -DnewVersion=${{ github.event.inputs.customVersion }}
|
||||
git add pom.xml **/pom.xml
|
||||
git commit -m "Increased release"
|
||||
- name: Update development version
|
||||
if: github.event.inputs.rebuild == 'v0.0.0'
|
||||
run: |
|
||||
mvn -q versions:set -DnextSnapshot
|
||||
git add pom.xml **/pom.xml
|
||||
|
@ -32,22 +57,24 @@ jobs:
|
|||
git push -f
|
||||
git reset --hard HEAD~1
|
||||
- name: Prepare release
|
||||
id: prep
|
||||
if: github.event.inputs.rebuild == 'v0.0.0'
|
||||
run: |
|
||||
mvn -q versions:set -DremoveSnapshot
|
||||
export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
|
||||
git add .
|
||||
git commit -m "release ${VERSION}"
|
||||
git tag -f v${VERSION}
|
||||
git push --tags
|
||||
echo ::set-output name=version::${VERSION}
|
||||
- name: Build with Maven
|
||||
run: mvn clean package -Pprod
|
||||
id: build
|
||||
run: |
|
||||
mvn clean package -Pprod ${{ github.event.inputs.extraMavenOptions }}
|
||||
export VERSION=$(mvn -q -Dexec.executable=echo -Dexec.args='${project.version}' --non-recursive exec:exec)
|
||||
echo ::set-output name=version::${VERSION}
|
||||
- name: Archive JAR
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: kafka-ui-${{ steps.prep.outputs.version }}
|
||||
path: kafka-ui-api/target/kafka-ui-api-${{ steps.prep.outputs.version }}.jar
|
||||
name: kafka-ui-${{ steps.build.outputs.version }}
|
||||
path: kafka-ui-api/target/kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
#################
|
||||
# #
|
||||
# Docker images #
|
||||
|
@ -70,20 +97,7 @@ jobs:
|
|||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build
|
||||
if: github.ref != 'refs/heads/master'
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
builder: ${{ steps.buildx.outputs.name }}
|
||||
context: kafka-ui-api
|
||||
push: false
|
||||
build-args: |
|
||||
JAR_FILE=kafka-ui-api-${{ steps.prep.outputs.version }}.jar
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
- name: Build and push
|
||||
if: github.ref == 'refs/heads/master'
|
||||
id: docker_build_and_push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
|
@ -91,10 +105,10 @@ jobs:
|
|||
context: kafka-ui-api
|
||||
push: true
|
||||
tags: |
|
||||
provectuslabs/kafka-ui:${{ steps.prep.outputs.version }}
|
||||
provectuslabs/kafka-ui:${{ steps.build.outputs.version }}
|
||||
provectuslabs/kafka-ui:latest
|
||||
build-args: |
|
||||
JAR_FILE=kafka-ui-api-${{ steps.prep.outputs.version }}.jar
|
||||
JAR_FILE=kafka-ui-api-${{ steps.build.outputs.version }}.jar
|
||||
cache-from: type=local,src=/tmp/.buildx-cache
|
||||
cache-to: type=local,dest=/tmp/.buildx-cache
|
||||
charts:
|
||||
|
@ -112,7 +126,7 @@ jobs:
|
|||
run: |
|
||||
export version=${{needs.release.outputs.version}}
|
||||
sed -i "s/appVersion:.*/appVersion: ${version}/" charts/kafka-ui/Chart.yaml
|
||||
- name:
|
||||
- name: add chart
|
||||
run: |
|
||||
export VERSION=${{needs.release.outputs.version}}
|
||||
MSG=$(helm package --app-version ${VERSION} charts/kafka-ui)
|
||||
|
|
53
README.md
53
README.md
|
@ -1,15 +1,15 @@
|
|||
 Kafka UI – Free Web UI for Kafka
|
||||
 UI for Apache Kafka – Free Web UI for Apache Kafka
|
||||
------------------
|
||||
|
||||

|
||||

|
||||
|
||||
<em>Kafka UI is a free open-source web UI for monitoring and management of Apache Kafka clusters. </em>
|
||||
<em>UI for Apache Kafka is a free open-source web UI for monitoring and management of Apache Kafka clusters. </em>
|
||||
|
||||
Kafka UI is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
|
||||
UI for Apache Kafka is a simple tool that makes your data flows observable, helps find and troubleshoot issues faster and deliver optimal performance. Its lightweight dashboard makes it easy to track key metrics of your Kafka clusters - Brokers, Topics, Partitions, Production, and Consumption.
|
||||
|
||||
Set up Kafka UI with just a couple of easy commands to visualize your Kafka data in a comprehensible way. You can run the tool locally or in the cloud.
|
||||
Set up UI for Apache Kafka with just a couple of easy commands to visualize your Kafka data in a comprehensible way. You can run the tool locally or in the cloud.
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
# Features
|
||||
|
@ -25,10 +25,10 @@ Set up Kafka UI with just a couple of easy commands to visualize your Kafka data
|
|||
|
||||
# Getting Started
|
||||
|
||||
To run Kafka UI, you can use a pre-built Docker image or build it locally.
|
||||
To run UI for Apache Kafka, you can use a pre-built Docker image or build it locally.
|
||||
|
||||
## Running From Docker Image
|
||||
The official Docker image for Kafka UI is hosted here: [hub.docker.com/r/provectuslabs/kafka-ui](https://hub.docker.com/r/provectuslabs/kafka-ui).
|
||||
The official Docker image for UI for Apache Kafka is hosted here: [hub.docker.com/r/provectuslabs/kafka-ui](https://hub.docker.com/r/provectuslabs/kafka-ui).
|
||||
|
||||
Launch Docker container in the background:
|
||||
```sh
|
||||
|
@ -49,13 +49,13 @@ If you prefer to use `docker-compose` please refer to the [documentation](docker
|
|||
|
||||
## Building With Docker
|
||||
|
||||
Steps to build Kafka UI locally with Docker:
|
||||
Steps to build UI for Apache Kafka locally with Docker:
|
||||
|
||||
1. Install prerequisites: Java and Docker
|
||||
2. Clone this repository and open a terminal in the directory of the project
|
||||
3. Build a Docker container with Kafka UI
|
||||
4. Start Kafka UI with your Kafka clusters
|
||||
5. Navigate to Kafka UI
|
||||
3. Build a Docker container with UI for Apache Kafka
|
||||
4. Start UI for Apache Kafka with your Kafka clusters
|
||||
5. Navigate to UI for Apache Kafka
|
||||
|
||||
### Prerequisites
|
||||
|
||||
|
@ -76,21 +76,21 @@ Steps to build Kafka UI locally with Docker:
|
|||
|
||||
Once you installed the prerequisites and cloned the repository, run the following commands in your project directory:
|
||||
|
||||
Build a Docker container with Kafka UI:
|
||||
Build a Docker container with UI for Apache Kafka:
|
||||
```sh
|
||||
./mvnw clean install -Pprod
|
||||
```
|
||||
Start Kafka UI with your Kafka clusters:
|
||||
Start UI for Apache Kafka with your Kafka clusters:
|
||||
```sh
|
||||
docker-compose -f ./docker/kafka-ui.yaml up
|
||||
```
|
||||
To see Kafka UI, navigate to http://localhost:8080.
|
||||
To see UI for Apache Kafka, navigate to http://localhost:8080.
|
||||
|
||||
If you want to start only kafka-clusters:
|
||||
```sh
|
||||
docker-compose -f ./docker/kafka-clusters-only.yaml up
|
||||
```
|
||||
Then start Kafka UI with a **local** profile.
|
||||
Then start UI for Apache Kafka with a **local** profile.
|
||||
|
||||
## Running Locally Without Docker
|
||||
|
||||
|
@ -108,11 +108,12 @@ To read more please follow to [chart documentation](charts/kafka-ui/README.md)
|
|||
|
||||
# Guides
|
||||
|
||||
To be done
|
||||
- [SSO configuration](guides/SSO.md)
|
||||
- [AWS IAM configuration](guides/AWS_IAM.md)
|
||||
|
||||
## Connecting to a Secure Broker
|
||||
|
||||
Kafka UI supports TLS (SSL) and SASL connections for [encryption and authentication](http://kafka.apache.org/090/documentation.html#security). This can be configured by providing a combination of the following files (placed into the Kafka root directory):
|
||||
UI for Apache Kafka supports TLS (SSL) and SASL connections for [encryption and authentication](http://kafka.apache.org/090/documentation.html#security). This can be configured by providing a combination of the following files (placed into the Kafka root directory):
|
||||
|
||||
To be continued
|
||||
|
||||
|
@ -131,6 +132,9 @@ kafka:
|
|||
bootstrapServers: localhost:29091
|
||||
zookeeper: localhost:2183
|
||||
schemaRegistry: http://localhost:8085
|
||||
schemaRegistryAuth:
|
||||
username: username
|
||||
password: password
|
||||
# schemaNameTemplate: "%s-value"
|
||||
jmxPort: 9997
|
||||
-
|
||||
|
@ -140,6 +144,8 @@ kafka:
|
|||
* `bootstrapServers`: where to connect
|
||||
* `zookeeper`: zookeeper service address
|
||||
* `schemaRegistry`: schemaRegistry's address
|
||||
* `schemaRegistryAuth.username`: schemaRegistry's basic authentication username
|
||||
* `schemaRegistryAuth.password`: schemaRegistry's basic authentication password
|
||||
* `schemaNameTemplate`: how keys are saved to schemaRegistry
|
||||
* `jmxPort`: open jmxPosrts of a broker
|
||||
* `readOnly`: enable read only mode
|
||||
|
@ -153,19 +159,20 @@ For example, if you want to use an environment variable to set the `name` parame
|
|||
|
||||
|Name |Description
|
||||
|-----------------------|-------------------------------
|
||||
|`SERVER_SERVLET_CONTEXT_PATH` | URI basePath
|
||||
|`KAFKA_CLUSTERS_0_NAME` | Cluster name
|
||||
|`KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS` |Address where to connect
|
||||
|`KAFKA_CLUSTERS_0_ZOOKEEPER` | Zookeper service address
|
||||
|`KAFKA_CLUSTERS_0_KSQLDBSERVER` | KSQL DB server address
|
||||
|`KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL` |Security protocol to connect to the brokers. For SSL connection use "SSL", for plaintext connection don't set this environment variable
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRY` |SchemaRegistry's address
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME` |SchemaRegistry's basic authentication username
|
||||
|`KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD` |SchemaRegistry's basic authentication password
|
||||
|`KAFKA_CLUSTERS_0_SCHEMANAMETEMPLATE` |How keys are saved to schemaRegistry
|
||||
|`KAFKA_CLUSTERS_0_JMXPORT` |Open jmxPosrts of a broker
|
||||
|`KAFKA_CLUSTERS_0_READONLY` |Enable read only mode. Default: false
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME` |Given name for the Kafka Connect cluster
|
||||
|`KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS` |Address of the Kafka Connect service endpoint
|
||||
|`LOGGING_LEVEL_ROOT` | Setting log level (all, debug, info, warn, error, fatal, off). Default: debug
|
||||
|`LOGGING_LEVEL_COM_PROVECTUS` |Setting log level (all, debug, info, warn, error, fatal, off). Default: debug
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|`SERVER_PORT` |Port for the embedded server. Default `8080`
|
||||
|
|
|
@ -17,5 +17,5 @@
|
|||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "kafka-ui.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:80
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:8080
|
||||
{{- end }}
|
||||
|
|
|
@ -52,14 +52,16 @@ spec:
|
|||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/" | urlParse }}
|
||||
path: {{ get $contextPath "path" }}
|
||||
port: http
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
timeoutSeconds: 10
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
{{- $contextPath := .Values.envs.config.SERVER_SERVLET_CONTEXT_PATH | default "" | printf "%s/" | urlParse }}
|
||||
path: {{ get $contextPath "path" }}
|
||||
port: http
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
|
|
|
@ -16,26 +16,34 @@ metadata:
|
|||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.ingress.tls }}
|
||||
{{- if .Values.ingress.tls.enabled }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
- {{ .Values.ingress.host }}
|
||||
secretName: {{ .Values.ingress.tls.secretName }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
- http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
{{- range .Values.ingress.precedingPaths }}
|
||||
- path: {{ .path }}
|
||||
backend:
|
||||
serviceName: {{ .serviceName }}
|
||||
servicePort: {{ .servicePort }}
|
||||
{{- end }}
|
||||
- backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- if .Values.ingress.path }}
|
||||
path: {{ .Values.ingress.path }}
|
||||
{{- end }}
|
||||
{{- range .Values.ingress.succeedingPaths }}
|
||||
- path: {{ .path }}
|
||||
backend:
|
||||
serviceName: {{ .serviceName }}
|
||||
servicePort: {{ .servicePort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.host }}
|
||||
host: {{ .Values.ingress.host }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -44,13 +44,32 @@ service:
|
|||
# if you want to force a specific nodePort. Must be use with service.type=NodePort
|
||||
# nodePort:
|
||||
|
||||
# Ingress configuration
|
||||
ingress:
|
||||
# Enable ingress resource
|
||||
enabled: false
|
||||
|
||||
# Annotations for the Ingress
|
||||
annotations: {}
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths: []
|
||||
tls: []
|
||||
|
||||
# The path for the Ingress
|
||||
path: ""
|
||||
|
||||
# The hostname for the Ingress
|
||||
host: ""
|
||||
|
||||
# configs for Ingress TLS
|
||||
tls:
|
||||
# Enable TLS termination for the Ingress
|
||||
enabled: false
|
||||
# the name of a pre-created Secret containing a TLS private key and certificate
|
||||
secretName: ""
|
||||
|
||||
# HTTP paths to add to the Ingress before the default path
|
||||
precedingPaths: []
|
||||
|
||||
# Http paths to add to the Ingress after the default path
|
||||
succeedingPaths: []
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
|
|
|
@ -19,7 +19,7 @@ services:
|
|||
- KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
|
||||
```
|
||||
|
||||
* If you prefer Kafka UI in read only mode
|
||||
* If you prefer UI for Apache Kafka in read only mode
|
||||
|
||||
```yaml
|
||||
version: '2'
|
||||
|
@ -37,7 +37,7 @@ services:
|
|||
- KAFKA_CLUSTERS_0_READONLY=true
|
||||
```
|
||||
|
||||
* Start Kafka UI process
|
||||
* Start UI for Apache Kafka process
|
||||
|
||||
```bash
|
||||
docker-compose up -d kafka-ui
|
||||
|
|
19
docker/connectors/sink-activities.json
Normal file
19
docker/connectors/sink-activities.json
Normal file
|
@ -0,0 +1,19 @@
|
|||
{
|
||||
"name": "sink_postgres_activities",
|
||||
"config": {
|
||||
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
|
||||
"connection.url": "jdbc:postgresql://postgres-db:5432/test",
|
||||
"connection.user": "dev_user",
|
||||
"connection.password": "12345",
|
||||
"topics": "source-activities",
|
||||
"table.name.format": "sink_activities",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"key.converter.schema.registry.url": "http://schemaregistry0:8085",
|
||||
"value.converter": "io.confluent.connect.avro.AvroConverter",
|
||||
"value.converter.schema.registry.url": "http://schemaregistry0:8085",
|
||||
"auto.create": "true",
|
||||
"pk.mode": "record_value",
|
||||
"pk.fields": "id",
|
||||
"insert.mode": "upsert"
|
||||
}
|
||||
}
|
20
docker/connectors/source-activities.json
Normal file
20
docker/connectors/source-activities.json
Normal file
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"name": "source_postgres_activities",
|
||||
"config": {
|
||||
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
|
||||
"connection.url": "jdbc:postgresql://postgres-db:5432/test",
|
||||
"connection.user": "dev_user",
|
||||
"connection.password": "12345",
|
||||
"topic.prefix": "source-",
|
||||
"poll.interval.ms": 3600000,
|
||||
"table.whitelist": "public.activities",
|
||||
"mode": "bulk",
|
||||
"transforms": "extractkey",
|
||||
"transforms.extractkey.type": "org.apache.kafka.connect.transforms.ExtractField$Key",
|
||||
"transforms.extractkey.field": "id",
|
||||
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
|
||||
"key.converter.schema.registry.url": "http://schemaregistry0:8085",
|
||||
"value.converter": "io.confluent.connect.avro.AvroConverter",
|
||||
"value.converter.schema.registry.url": "http://schemaregistry0:8085"
|
||||
}
|
||||
}
|
9
docker/connectors/start.sh
Executable file
9
docker/connectors/start.sh
Executable file
|
@ -0,0 +1,9 @@
|
|||
#! /bin/bash
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' kafka-connect0:8083)" != "200" ]]
|
||||
do sleep 5
|
||||
done
|
||||
|
||||
echo "\n --------------Creating connectors..."
|
||||
for filename in /connectors/*.json; do
|
||||
curl -X POST -H "Content-Type: application/json" -d @$filename http://kafka-connect0:8083/connectors
|
||||
done
|
3
docker/jaas/client.properties
Normal file
3
docker/jaas/client.properties
Normal file
|
@ -0,0 +1,3 @@
|
|||
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";
|
||||
security.protocol=SASL_PLAINTEXT
|
||||
sasl.mechanism=PLAIN
|
14
docker/jaas/kafka_server.conf
Normal file
14
docker/jaas/kafka_server.conf
Normal file
|
@ -0,0 +1,14 @@
|
|||
KafkaServer {
|
||||
org.apache.kafka.common.security.plain.PlainLoginModule required
|
||||
username="admin"
|
||||
password="admin-secret"
|
||||
user_admin="admin-secret"
|
||||
user_enzo="cisternino";
|
||||
};
|
||||
|
||||
KafkaClient {
|
||||
org.apache.kafka.common.security.plain.PlainLoginModule required
|
||||
user_admin="admin-secret";
|
||||
};
|
||||
|
||||
Client {};
|
5
docker/jaas/schema_registry.jaas
Normal file
5
docker/jaas/schema_registry.jaas
Normal file
|
@ -0,0 +1,5 @@
|
|||
SchemaRegistryProps {
|
||||
org.eclipse.jetty.jaas.spi.PropertyFileLoginModule required
|
||||
file="/conf/schema_registry.password"
|
||||
debug="false";
|
||||
};
|
1
docker/jaas/schema_registry.password
Normal file
1
docker/jaas/schema_registry.password
Normal file
|
@ -0,0 +1 @@
|
|||
admin: OBF:1w8t1tvf1w261w8v1w1c1tvn1w8x,admin
|
66
docker/kafka-cluster-sr-auth.yaml
Normal file
66
docker/kafka-cluster-sr-auth.yaml
Normal file
|
@ -0,0 +1,66 @@
|
|||
---
|
||||
version: '2'
|
||||
services:
|
||||
|
||||
zookeeper1:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2182:2181
|
||||
|
||||
kafka1:
|
||||
image: confluentinc/cp-kafka:5.2.4
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9998
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
|
||||
ports:
|
||||
- 9093:9093
|
||||
- 9998:9998
|
||||
|
||||
schemaregistry1:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
ports:
|
||||
- 18085:8085
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
- kafka1
|
||||
volumes:
|
||||
- ./jaas:/conf
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
||||
|
||||
# Default credentials: admin/letmein
|
||||
SCHEMA_REGISTRY_AUTHENTICATION_METHOD: BASIC
|
||||
SCHEMA_REGISTRY_AUTHENTICATION_REALM: SchemaRegistryProps
|
||||
SCHEMA_REGISTRY_AUTHENTICATION_ROLES: admin
|
||||
SCHEMA_REGISTRY_OPTS: -Djava.security.auth.login.config=/conf/schema_registry.jaas
|
||||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.2.4
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
173
docker/kafka-ui-connectors.yaml
Normal file
173
docker/kafka-ui-connectors.yaml
Normal file
|
@ -0,0 +1,173 @@
|
|||
---
|
||||
version: '2'
|
||||
services:
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:master
|
||||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- zookeeper1
|
||||
- kafka0
|
||||
- kafka1
|
||||
- schemaregistry0
|
||||
- kafka-connect0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
||||
KAFKA_CLUSTERS_0_JMXPORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
KAFKA_CLUSTERS_1_NAME: secondLocal
|
||||
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
||||
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
|
||||
KAFKA_CLUSTERS_1_JMXPORT: 9998
|
||||
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.2.4
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9997:9997
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9997
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
|
||||
zookeeper1:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
|
||||
kafka1:
|
||||
image: confluentinc/cp-kafka:5.2.4
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
ports:
|
||||
- 9093:9093
|
||||
- 9998:9998
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9998
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9998
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:5.2.4
|
||||
ports:
|
||||
- 8085:8085
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
schemaregistry1:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
ports:
|
||||
- 18085:8085
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
- kafka1
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
||||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:6.0.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
environment:
|
||||
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
|
||||
CONNECT_GROUP_ID: compose-connect-group
|
||||
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
|
||||
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
|
||||
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
|
||||
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.2.4
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
||||
|
||||
postgres-db:
|
||||
build:
|
||||
context: ./postgres
|
||||
args:
|
||||
image: postgres:9.6.22
|
||||
ports:
|
||||
- 5432:5432
|
||||
environment:
|
||||
POSTGRES_USER: 'dev_user'
|
||||
POSTGRES_PASSWORD: '12345'
|
||||
|
||||
create-connectors:
|
||||
image: tutum/curl
|
||||
depends_on:
|
||||
- postgres-db
|
||||
- kafka-connect0
|
||||
volumes:
|
||||
- ./connectors:/connectors
|
||||
command: bash -c '/connectors/start.sh'
|
19
docker/kafka-ui-reverse-proxy.yaml
Normal file
19
docker/kafka-ui-reverse-proxy.yaml
Normal file
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
version: '2'
|
||||
services:
|
||||
nginx:
|
||||
image: nginx:latest
|
||||
volumes:
|
||||
- ./proxy.conf:/etc/nginx/conf.d/default.conf
|
||||
ports:
|
||||
- 8080:80
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:latest
|
||||
ports:
|
||||
- 8082:8080
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
|
||||
SERVER_SERVLET_CONTEXT_PATH: /kafka-ui
|
52
docker/kafka-ui-sasl.yaml
Normal file
52
docker/kafka-ui-sasl.yaml
Normal file
|
@ -0,0 +1,52 @@
|
|||
---
|
||||
version: '2'
|
||||
services:
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper
|
||||
- kafka
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
# SERVER_SERVLET_CONTEXT_PATH: "/kafkaui"
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:9092
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper:2181
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SASL_PLAINTEXT
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM: PLAIN
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG: 'org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="admin-secret";'
|
||||
zookeeper:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka:
|
||||
image: wurstmeister/kafka:latest
|
||||
hostname: kafka
|
||||
container_name: kafka
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- '9092:9092'
|
||||
environment:
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENERS: SASL_PLAINTEXT://kafka:9092
|
||||
KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://kafka:9092
|
||||
KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'true'
|
||||
ALLOW_PLAINTEXT_LISTENER: 'yes'
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas/kafka_server.conf"
|
||||
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: SASL_PLAINTEXT
|
||||
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
|
||||
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
|
||||
KAFKA_SECURITY_PROTOCOL: SASL_PLAINTEXT
|
||||
KAFKA_SUPER_USERS: User:admin,User:enzo
|
||||
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: 'true'
|
||||
volumes:
|
||||
- ./jaas:/etc/kafka/jaas
|
|
@ -26,7 +26,7 @@ services:
|
|||
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
||||
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
|
||||
KAFKA_CLUSTERS_1_JMXPORT: 9998
|
||||
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
|
||||
|
|
1
docker/message.json
Normal file
1
docker/message.json
Normal file
|
@ -0,0 +1 @@
|
|||
{}
|
9
docker/postgres/Dockerfile
Normal file
9
docker/postgres/Dockerfile
Normal file
|
@ -0,0 +1,9 @@
|
|||
ARG image
|
||||
|
||||
FROM ${image}
|
||||
|
||||
MAINTAINER Provectus Team
|
||||
|
||||
ADD data.sql /docker-entrypoint-initdb.d
|
||||
|
||||
EXPOSE 5432
|
24
docker/postgres/data.sql
Normal file
24
docker/postgres/data.sql
Normal file
|
@ -0,0 +1,24 @@
|
|||
CREATE DATABASE test WITH OWNER = dev_user;
|
||||
\connect test
|
||||
|
||||
CREATE TABLE activities
|
||||
(
|
||||
id INTEGER PRIMARY KEY,
|
||||
msg varchar(24),
|
||||
action varchar(128),
|
||||
browser varchar(24),
|
||||
device json,
|
||||
createdAt timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
insert into activities(id, action, msg, browser, device)
|
||||
values (1, 'LOGIN', 'Success', 'Chrome', '{
|
||||
"name": "Chrome",
|
||||
"major": "67",
|
||||
"version": "67.0.3396.99"
|
||||
}'),
|
||||
(2, 'LOGIN', 'Failed', 'Apple WebKit', '{
|
||||
"name": "WebKit",
|
||||
"major": "605",
|
||||
"version": "605.1.15"
|
||||
}');
|
9
docker/proxy.conf
Normal file
9
docker/proxy.conf
Normal file
|
@ -0,0 +1,9 @@
|
|||
server {
|
||||
listen 80;
|
||||
server_name localhost;
|
||||
|
||||
location /kafka-ui {
|
||||
# rewrite /kafka-ui/(.*) /$1 break;
|
||||
proxy_pass http://kafka-ui:8080;
|
||||
}
|
||||
}
|
41
guides/AWS_IAM.md
Normal file
41
guides/AWS_IAM.md
Normal file
|
@ -0,0 +1,41 @@
|
|||
# How to configure AWS IAM Authentication
|
||||
|
||||
UI for Apache Kafka comes with built-in [aws-msk-iam-auth](https://github.com/aws/aws-msk-iam-auth) library.
|
||||
|
||||
You could pass sasl configs in properties section for each cluster.
|
||||
|
||||
More details could be found here: [aws-msk-iam-auth](https://github.com/aws/aws-msk-iam-auth)
|
||||
|
||||
## Examples:
|
||||
|
||||
Please replace
|
||||
* <KAFKA_URL> with broker list
|
||||
* <PROFILE_NAME> with your aws profile
|
||||
|
||||
|
||||
### Running From Docker Image
|
||||
|
||||
```sh
|
||||
docker run -p 8080:8080 \
|
||||
-e KAFKA_CLUSTERS_0_NAME=local \
|
||||
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=<KAFKA_URL> \
|
||||
-e KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL=SASL_SSL \
|
||||
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_MECHANISM=AWS_MSK_IAM \
|
||||
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_CLIENT_CALLBACK_HANDLER_CLASS=software.amazon.msk.auth.iam.IAMClientCallbackHandler \
|
||||
-e KAFKA_CLUSTERS_0_PROPERTIES_SASL_JAAS_CONFIG=software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName="<PROFILE_NAME>"; \
|
||||
-d provectuslabs/kafka-ui:latest
|
||||
```
|
||||
|
||||
### Configuring by application.yaml
|
||||
|
||||
```yaml
|
||||
kafka:
|
||||
clusters:
|
||||
- name: local
|
||||
bootstrapServers: <KAFKA_URL>
|
||||
properties:
|
||||
security.protocol: SASL_SSL
|
||||
sasl.mechanism: AWS_MSK_IAM
|
||||
sasl.client.callback.handler.class: software.amazon.msk.auth.iam.IAMClientCallbackHandler
|
||||
sasl.jaas.config: software.amazon.msk.auth.iam.IAMLoginModule required awsProfileName="<PROFILE_NAME>";
|
||||
```
|
48
guides/SSO.md
Normal file
48
guides/SSO.md
Normal file
|
@ -0,0 +1,48 @@
|
|||
# How to configure SSO
|
||||
SSO require additionaly to configure TLS for application, in that example we will use self-signed certificate, in case of use legal certificates please skip step 1.
|
||||
#### Step 1
|
||||
At this step we will generate self-signed PKCS12 keypair.
|
||||
``` bash
|
||||
mkdir cert
|
||||
keytool -genkeypair -alias ui-for-apache-kafka -keyalg RSA -keysize 2048 \
|
||||
-storetype PKCS12 -keystore cert/ui-for-apache-kafka.p12 -validity 3650
|
||||
```
|
||||
#### Step 2
|
||||
Create new application in any SSO provider, we will continue with [Auth0](https://auth0.com).
|
||||
|
||||
<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-new-app.png" width="70%"/>
|
||||
|
||||
After that need to provide callback URLs, in our case we will use `https://127.0.0.1:8080/login/oauth2/code/auth0`
|
||||
|
||||
<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-configuration.png" width="70%"/>
|
||||
|
||||
This is a main parameters required for enabling SSO
|
||||
|
||||
<img src="https://github.com/provectus/kafka-ui/raw/images/images/sso-parameters.png" width="70%"/>
|
||||
|
||||
#### Step 3
|
||||
To launch UI for Apache Kafka with enabled TLS and SSO run following:
|
||||
``` bash
|
||||
docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_ENABLED=true \
|
||||
-e SECURITY_BASIC_ENABLED=true \
|
||||
-e SERVER_SSL_KEY_STORE_TYPE=PKCS12 \
|
||||
-e SERVER_SSL_KEY_STORE=/opt/cert/ui-for-apache-kafka.p12 \
|
||||
-e SERVER_SSL_KEY_STORE_PASSWORD=123456 \
|
||||
-e SERVER_SSL_KEY_ALIAS=ui-for-apache-kafka \
|
||||
-e SERVER_SSL_ENABLED=true \
|
||||
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
|
||||
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
|
||||
-e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
|
||||
-e TRUST_STORE=/opt/cert/ui-for-apache-kafka.p12 \
|
||||
-e TRUST_STORE_PASSWORD=123456 \
|
||||
provectuslabs/kafka-ui:0.1.0
|
||||
```
|
||||
In the case with trusted CA-signed SSL certificate and SSL termination somewhere outside of application we can pass only SSO related environment variables:
|
||||
``` bash
|
||||
docker run -p 8080:8080 -v `pwd`/cert:/opt/cert -e AUTH_ENABLED=true \
|
||||
-e SECURITY_BASIC_ENABLED=true \
|
||||
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
|
||||
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTSECRET=YXfRjmodifiedTujnkVr7zuW9ECCAK4TcnCio-i \
|
||||
-e SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_AUTH0_ISSUER_URI=https://dev-a63ggcut.auth0.com/ \
|
||||
provectuslabs/kafka-ui:0.1.0
|
||||
```
|
BIN
images/apache-kafka-ui-interface-dashboard.png
Normal file
BIN
images/apache-kafka-ui-interface-dashboard.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 87 KiB |
Binary file not shown.
Before Width: | Height: | Size: 76 KiB |
|
@ -4,7 +4,7 @@
|
|||
<parent>
|
||||
<artifactId>kafka-ui</artifactId>
|
||||
<groupId>com.provectus</groupId>
|
||||
<version>0.0.11-SNAPSHOT</version>
|
||||
<version>0.1.1-SNAPSHOT</version>
|
||||
</parent>
|
||||
<modelVersion>4.0.0</modelVersion>
|
||||
|
||||
|
@ -86,12 +86,23 @@
|
|||
<artifactId>kafka-avro-serializer</artifactId>
|
||||
<version>${confluent.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-json-schema-serializer</artifactId>
|
||||
<version>${confluent.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>io.confluent</groupId>
|
||||
<artifactId>kafka-protobuf-serializer</artifactId>
|
||||
<version>${confluent.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>software.amazon.msk</groupId>
|
||||
<artifactId>aws-msk-iam-auth</artifactId>
|
||||
<version>1.1.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.avro</groupId>
|
||||
<artifactId>avro</artifactId>
|
||||
|
|
|
@ -20,8 +20,10 @@ public class ClustersProperties {
|
|||
String bootstrapServers;
|
||||
String zookeeper;
|
||||
String schemaRegistry;
|
||||
SchemaRegistryAuth schemaRegistryAuth;
|
||||
String ksqldbServer;
|
||||
String schemaNameTemplate = "%s-value";
|
||||
String keySchemaNameTemplate = "%s-key";
|
||||
String protobufFile;
|
||||
String protobufMessageName;
|
||||
List<ConnectCluster> kafkaConnect;
|
||||
|
@ -35,4 +37,10 @@ public class ClustersProperties {
|
|||
String name;
|
||||
String address;
|
||||
}
|
||||
|
||||
@Data
|
||||
public static class SchemaRegistryAuth {
|
||||
String username;
|
||||
String password;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ public class Config {
|
|||
}
|
||||
|
||||
private GenericKeyedObjectPoolConfig poolConfig() {
|
||||
GenericKeyedObjectPoolConfig poolConfig = new GenericKeyedObjectPoolConfig();
|
||||
final var poolConfig = new GenericKeyedObjectPoolConfig();
|
||||
poolConfig.setMaxIdlePerKey(3);
|
||||
poolConfig.setMaxTotalPerKey(3);
|
||||
return poolConfig;
|
||||
|
@ -30,7 +30,7 @@ public class Config {
|
|||
|
||||
@Bean
|
||||
public MBeanExporter exporter() {
|
||||
final MBeanExporter exporter = new MBeanExporter();
|
||||
final var exporter = new MBeanExporter();
|
||||
exporter.setAutodetect(true);
|
||||
exporter.setExcludedBeans("pool");
|
||||
return exporter;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
package com.provectus.kafka.ui.config;
|
||||
|
||||
import org.springframework.boot.autoconfigure.web.ServerProperties;
|
||||
import org.springframework.stereotype.Component;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import org.springframework.web.server.WebFilter;
|
||||
|
@ -7,14 +8,31 @@ import org.springframework.web.server.WebFilterChain;
|
|||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Component
|
||||
|
||||
public class CustomWebFilter implements WebFilter {
|
||||
|
||||
private final ServerProperties serverProperties;
|
||||
|
||||
public CustomWebFilter(ServerProperties serverProperties) {
|
||||
this.serverProperties = serverProperties;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> filter(ServerWebExchange exchange, WebFilterChain chain) {
|
||||
if (exchange.getRequest().getURI().getPath().equals("/")
|
||||
|| exchange.getRequest().getURI().getPath().startsWith("/ui")) {
|
||||
String contextPath = serverProperties.getServlet().getContextPath() != null
|
||||
? serverProperties.getServlet().getContextPath() : "";
|
||||
|
||||
final String path = exchange.getRequest().getURI().getPath().replaceAll("/$", "");
|
||||
if (path.equals(contextPath) || path.startsWith(contextPath + "/ui")) {
|
||||
return chain.filter(
|
||||
exchange.mutate().request(exchange.getRequest().mutate().path("/index.html").build())
|
||||
.build());
|
||||
.build()
|
||||
);
|
||||
} else if (path.startsWith(contextPath)) {
|
||||
return chain.filter(
|
||||
exchange.mutate().request(exchange.getRequest().mutate().contextPath(contextPath).build())
|
||||
.build()
|
||||
);
|
||||
}
|
||||
|
||||
return chain.filter(exchange);
|
||||
|
|
|
@ -39,4 +39,10 @@ public class ClustersController implements ClustersApi {
|
|||
public Mono<ResponseEntity<Flux<Cluster>>> getClusters(ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(Flux.fromIterable(clusterService.getClusters())));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Cluster>> updateClusterInfo(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.updateCluster(clusterName).map(ResponseEntity::ok);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +1,23 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
import com.provectus.kafka.ui.api.ConsumerGroupsApi;
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetails;
|
||||
import com.provectus.kafka.ui.model.TopicConsumerGroups;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupOffsetsReset;
|
||||
import com.provectus.kafka.ui.model.PartitionOffset;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import com.provectus.kafka.ui.service.OffsetsResetService;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.util.CollectionUtils;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
@ -18,6 +28,15 @@ import reactor.core.publisher.Mono;
|
|||
@Log4j2
|
||||
public class ConsumerGroupsController implements ConsumerGroupsApi {
|
||||
private final ClusterService clusterService;
|
||||
private final OffsetsResetService offsetsResetService;
|
||||
private final ClustersStorage clustersStorage;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> deleteConsumerGroup(String clusterName, String id,
|
||||
ServerWebExchange exchange) {
|
||||
return clusterService.deleteConsumerGroupById(clusterName, id)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ConsumerGroupDetails>> getConsumerGroup(
|
||||
|
@ -37,9 +56,56 @@ public class ConsumerGroupsController implements ConsumerGroupsApi {
|
|||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicConsumerGroups>> getTopicConsumerGroups(
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroup>>> getTopicConsumerGroups(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
return clusterService.getTopicConsumerGroupDetail(clusterName, topicName)
|
||||
.map(ResponseEntity::ok);
|
||||
return clusterService.getConsumerGroups(clusterName, Optional.of(topicName))
|
||||
.map(Flux::fromIterable)
|
||||
.map(ResponseEntity::ok)
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> resetConsumerGroupOffsets(String clusterName, String group,
|
||||
Mono<ConsumerGroupOffsetsReset>
|
||||
consumerGroupOffsetsReset,
|
||||
ServerWebExchange exchange) {
|
||||
return consumerGroupOffsetsReset.map(reset -> {
|
||||
var cluster =
|
||||
clustersStorage.getClusterByName(clusterName).orElseThrow(ClusterNotFoundException::new);
|
||||
|
||||
switch (reset.getResetType()) {
|
||||
case EARLIEST:
|
||||
offsetsResetService
|
||||
.resetToEarliest(cluster, group, reset.getTopic(), reset.getPartitions());
|
||||
break;
|
||||
case LATEST:
|
||||
offsetsResetService
|
||||
.resetToLatest(cluster, group, reset.getTopic(), reset.getPartitions());
|
||||
break;
|
||||
case TIMESTAMP:
|
||||
if (reset.getResetToTimestamp() == null) {
|
||||
throw new ValidationException(
|
||||
"resetToTimestamp is required when TIMESTAMP reset type used");
|
||||
}
|
||||
offsetsResetService
|
||||
.resetToTimestamp(cluster, group, reset.getTopic(), reset.getPartitions(),
|
||||
reset.getResetToTimestamp());
|
||||
break;
|
||||
case OFFSET:
|
||||
if (CollectionUtils.isEmpty(reset.getPartitionsOffsets())) {
|
||||
throw new ValidationException(
|
||||
"partitionsOffsets is required when OFFSET reset type used");
|
||||
}
|
||||
Map<Integer, Long> offsets = reset.getPartitionsOffsets().stream()
|
||||
.collect(toMap(PartitionOffset::getPartition, PartitionOffset::getOffset));
|
||||
offsetsResetService.resetToOffsets(cluster, group, reset.getTopic(), offsets);
|
||||
break;
|
||||
default:
|
||||
throw new ValidationException("Unknown resetType " + reset.getResetType());
|
||||
}
|
||||
return ResponseEntity.ok().build();
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -2,8 +2,11 @@ package com.provectus.kafka.ui.controller;
|
|||
|
||||
import com.provectus.kafka.ui.api.MessagesApi;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessage;
|
||||
import com.provectus.kafka.ui.model.SeekDirection;
|
||||
import com.provectus.kafka.ui.model.SeekType;
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.service.ClusterService;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
@ -13,6 +16,7 @@ import javax.validation.Valid;
|
|||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.commons.lang3.tuple.Pair;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import org.springframework.web.server.ServerWebExchange;
|
||||
|
@ -40,28 +44,50 @@ public class MessagesController implements MessagesApi {
|
|||
@Override
|
||||
public Mono<ResponseEntity<Flux<TopicMessage>>> getTopicMessages(
|
||||
String clusterName, String topicName, @Valid SeekType seekType, @Valid List<String> seekTo,
|
||||
@Valid Integer limit, @Valid String q, ServerWebExchange exchange) {
|
||||
return parseConsumerPosition(seekType, seekTo)
|
||||
@Valid Integer limit, @Valid String q, @Valid SeekDirection seekDirection,
|
||||
ServerWebExchange exchange) {
|
||||
return parseConsumerPosition(topicName, seekType, seekTo, seekDirection)
|
||||
.map(consumerPosition -> ResponseEntity
|
||||
.ok(clusterService.getMessages(clusterName, topicName, consumerPosition, q, limit)));
|
||||
}
|
||||
|
||||
private Mono<ConsumerPosition> parseConsumerPosition(SeekType seekType, List<String> seekTo) {
|
||||
@Override
|
||||
public Mono<ResponseEntity<TopicMessageSchema>> getTopicSchema(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
return Mono.just(clusterService.getTopicSchema(clusterName, topicName))
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Void>> sendTopicMessages(
|
||||
String clusterName, String topicName, @Valid Mono<CreateTopicMessage> createTopicMessage,
|
||||
ServerWebExchange exchange) {
|
||||
return createTopicMessage.flatMap(msg ->
|
||||
clusterService.sendMessage(clusterName, topicName, msg)
|
||||
).map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
|
||||
private Mono<ConsumerPosition> parseConsumerPosition(
|
||||
String topicName, SeekType seekType, List<String> seekTo, SeekDirection seekDirection) {
|
||||
return Mono.justOrEmpty(seekTo)
|
||||
.defaultIfEmpty(Collections.emptyList())
|
||||
.flatMapIterable(Function.identity())
|
||||
.map(p -> {
|
||||
String[] splited = p.split("::");
|
||||
if (splited.length != 2) {
|
||||
String[] split = p.split("::");
|
||||
if (split.length != 2) {
|
||||
throw new IllegalArgumentException(
|
||||
"Wrong seekTo argument format. See API docs for details");
|
||||
}
|
||||
|
||||
return Pair.of(Integer.parseInt(splited[0]), Long.parseLong(splited[1]));
|
||||
return Pair.of(
|
||||
new TopicPartition(topicName, Integer.parseInt(split[0])),
|
||||
Long.parseLong(split[1])
|
||||
);
|
||||
})
|
||||
.collectMap(Pair::getKey, Pair::getValue)
|
||||
.map(positions -> new ConsumerPosition(seekType != null ? seekType : SeekType.BEGINNING,
|
||||
positions));
|
||||
positions, seekDirection));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.util.ResourceUtil;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
import org.springframework.boot.autoconfigure.web.ServerProperties;
|
||||
import org.springframework.core.io.Resource;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.web.bind.annotation.GetMapping;
|
||||
import org.springframework.web.bind.annotation.RestController;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@RestController
|
||||
@RequiredArgsConstructor
|
||||
@Log4j2
|
||||
public class StaticController {
|
||||
private final ServerProperties serverProperties;
|
||||
|
||||
@Value("classpath:static/index.html")
|
||||
private Resource indexFile;
|
||||
private final AtomicReference<String> renderedIndexFile = new AtomicReference<>();
|
||||
|
||||
@GetMapping(value = "/index.html", produces = { "text/html" })
|
||||
public Mono<ResponseEntity<String>> getIndex() {
|
||||
return Mono.just(ResponseEntity.ok(getRenderedIndexFile()));
|
||||
}
|
||||
|
||||
public String getRenderedIndexFile() {
|
||||
String rendered = renderedIndexFile.get();
|
||||
if (rendered == null) {
|
||||
rendered = buildIndexFile();
|
||||
if (renderedIndexFile.compareAndSet(null, rendered)) {
|
||||
return rendered;
|
||||
} else {
|
||||
return renderedIndexFile.get();
|
||||
}
|
||||
} else {
|
||||
return rendered;
|
||||
}
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private String buildIndexFile() {
|
||||
final String contextPath = serverProperties.getServlet().getContextPath() != null
|
||||
? serverProperties.getServlet().getContextPath() : "";
|
||||
final String staticPath = contextPath + "/static";
|
||||
return ResourceUtil.readAsString(indexFile)
|
||||
.replace("href=\"./static", "href=\"" + staticPath)
|
||||
.replace("src=\"./static", "src=\"" + staticPath)
|
||||
.replace("window.basePath=\"\"", "window.basePath=\"" + contextPath + "\"");
|
||||
}
|
||||
}
|
|
@ -1,6 +1,10 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.TopicsApi;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncrease;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponse;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChange;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeResponse;
|
||||
import com.provectus.kafka.ui.model.Topic;
|
||||
import com.provectus.kafka.ui.model.TopicColumnsToSort;
|
||||
import com.provectus.kafka.ui.model.TopicConfig;
|
||||
|
@ -86,4 +90,23 @@ public class TopicsController implements TopicsApi {
|
|||
ServerWebExchange exchange) {
|
||||
return clusterService.updateTopic(clusterId, topicName, topicUpdate).map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<PartitionsIncreaseResponse>> increaseTopicPartitions(
|
||||
String clusterName, String topicName,
|
||||
Mono<PartitionsIncrease> partitionsIncrease,
|
||||
ServerWebExchange exchange) {
|
||||
return partitionsIncrease.flatMap(
|
||||
partitions -> clusterService.increaseTopicPartitions(clusterName, topicName, partitions))
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<ReplicationFactorChangeResponse>> changeReplicationFactor(
|
||||
String clusterName, String topicName, Mono<ReplicationFactorChange> replicationFactorChange,
|
||||
ServerWebExchange exchange) {
|
||||
return replicationFactorChange
|
||||
.flatMap(rfc -> clusterService.changeReplicationFactor(clusterName, topicName, rfc))
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
package com.provectus.kafka.ui.deserialization;
|
||||
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public class ProtobufFileRecordDeserializer implements RecordDeserializer {
|
||||
private final ProtobufSchema protobufSchema;
|
||||
private final ObjectMapper objectMapper;
|
||||
|
||||
public ProtobufFileRecordDeserializer(Path protobufSchemaPath, String messageName,
|
||||
ObjectMapper objectMapper) throws IOException {
|
||||
this.objectMapper = objectMapper;
|
||||
final String schemaString = Files.lines(protobufSchemaPath).collect(Collectors.joining());
|
||||
this.protobufSchema = new ProtobufSchema(schemaString).copy(messageName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
|
||||
try {
|
||||
final DynamicMessage message = DynamicMessage.parseFrom(
|
||||
protobufSchema.toDescriptor(),
|
||||
new ByteArrayInputStream(record.value().get())
|
||||
);
|
||||
byte[] bytes = ProtobufSchemaUtils.toJson(message);
|
||||
return parseJson(bytes);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to parse record from topic " + record.topic(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private Object parseJson(byte[] bytes) throws IOException {
|
||||
return objectMapper.readValue(bytes, new TypeReference<Map<String, Object>>() {
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package com.provectus.kafka.ui.deserialization;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public interface RecordDeserializer {
|
||||
|
||||
Object deserialize(ConsumerRecord<Bytes, Bytes> record);
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
package com.provectus.kafka.ui.deserialization;
|
||||
|
||||
import com.fasterxml.jackson.core.type.TypeReference;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.protobuf.Message;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import io.confluent.kafka.schemaregistry.SchemaProvider;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchemaProvider;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
|
||||
import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.rest.entities.Schema;
|
||||
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaProvider;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
|
||||
import io.confluent.kafka.serializers.KafkaAvroDeserializer;
|
||||
import io.confluent.kafka.serializers.protobuf.KafkaProtobufDeserializer;
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.avro.generic.GenericRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
@Log4j2
|
||||
public class SchemaRegistryRecordDeserializer implements RecordDeserializer {
|
||||
|
||||
private static final int CLIENT_IDENTITY_MAP_CAPACITY = 100;
|
||||
|
||||
private final KafkaCluster cluster;
|
||||
private final SchemaRegistryClient schemaRegistryClient;
|
||||
private final KafkaAvroDeserializer avroDeserializer;
|
||||
private final KafkaProtobufDeserializer<?> protobufDeserializer;
|
||||
private final ObjectMapper objectMapper;
|
||||
private final StringDeserializer stringDeserializer;
|
||||
|
||||
private final Map<String, MessageFormat> topicFormatMap = new ConcurrentHashMap<>();
|
||||
|
||||
public SchemaRegistryRecordDeserializer(KafkaCluster cluster, ObjectMapper objectMapper) {
|
||||
this.cluster = cluster;
|
||||
this.objectMapper = objectMapper;
|
||||
|
||||
this.schemaRegistryClient = Optional.ofNullable(cluster.getSchemaRegistry())
|
||||
.map(schemaRegistryUrl -> {
|
||||
List<SchemaProvider> schemaProviders =
|
||||
List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider());
|
||||
return new CachedSchemaRegistryClient(
|
||||
Collections.singletonList(schemaRegistryUrl),
|
||||
CLIENT_IDENTITY_MAP_CAPACITY,
|
||||
schemaProviders,
|
||||
Collections.emptyMap()
|
||||
);
|
||||
}
|
||||
).orElse(null);
|
||||
|
||||
this.avroDeserializer = Optional.ofNullable(this.schemaRegistryClient)
|
||||
.map(KafkaAvroDeserializer::new)
|
||||
.orElse(null);
|
||||
this.protobufDeserializer = Optional.ofNullable(this.schemaRegistryClient)
|
||||
.map(KafkaProtobufDeserializer::new)
|
||||
.orElse(null);
|
||||
this.stringDeserializer = new StringDeserializer();
|
||||
}
|
||||
|
||||
public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
|
||||
MessageFormat format = getMessageFormat(record);
|
||||
|
||||
try {
|
||||
Object parsedValue;
|
||||
switch (format) {
|
||||
case AVRO:
|
||||
parsedValue = parseAvroRecord(record);
|
||||
break;
|
||||
case PROTOBUF:
|
||||
parsedValue = parseProtobufRecord(record);
|
||||
break;
|
||||
case JSON:
|
||||
parsedValue = parseJsonRecord(record);
|
||||
break;
|
||||
case STRING:
|
||||
parsedValue = parseStringRecord(record);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException(
|
||||
"Unknown message format " + format + " for topic " + record.topic());
|
||||
}
|
||||
return parsedValue;
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException("Failed to parse record from topic " + record.topic(), e);
|
||||
}
|
||||
}
|
||||
|
||||
private MessageFormat getMessageFormat(ConsumerRecord<Bytes, Bytes> record) {
|
||||
return topicFormatMap.computeIfAbsent(record.topic(), k -> detectFormat(record));
|
||||
}
|
||||
|
||||
private MessageFormat detectFormat(ConsumerRecord<Bytes, Bytes> record) {
|
||||
String schemaName = String.format(cluster.getSchemaNameTemplate(), record.topic());
|
||||
if (schemaRegistryClient != null) {
|
||||
try {
|
||||
final List<Integer> versions = schemaRegistryClient.getAllVersions(schemaName);
|
||||
if (!versions.isEmpty()) {
|
||||
final Integer version = versions.iterator().next();
|
||||
final String subjectName = String.format(cluster.getSchemaNameTemplate(), record.topic());
|
||||
final Schema schema = schemaRegistryClient.getByVersion(subjectName, version, false);
|
||||
if (schema.getSchemaType().equals(MessageFormat.PROTOBUF.name())) {
|
||||
try {
|
||||
protobufDeserializer.deserialize(record.topic(), record.value().get());
|
||||
return MessageFormat.PROTOBUF;
|
||||
} catch (Throwable e) {
|
||||
log.info("Failed to get Protobuf schema for topic {}", record.topic(), e);
|
||||
}
|
||||
} else if (schema.getSchemaType().equals(MessageFormat.AVRO.name())) {
|
||||
try {
|
||||
avroDeserializer.deserialize(record.topic(), record.value().get());
|
||||
return MessageFormat.AVRO;
|
||||
} catch (Throwable e) {
|
||||
log.info("Failed to get Avro schema for topic {}", record.topic(), e);
|
||||
}
|
||||
} else if (schema.getSchemaType().equals(MessageFormat.JSON.name())) {
|
||||
try {
|
||||
parseJsonRecord(record);
|
||||
return MessageFormat.JSON;
|
||||
} catch (IOException e) {
|
||||
log.info("Failed to parse json from topic {}", record.topic());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (RestClientException | IOException e) {
|
||||
log.warn("Failed to get Schema for topic {}", record.topic(), e);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
parseJsonRecord(record);
|
||||
return MessageFormat.JSON;
|
||||
} catch (IOException e) {
|
||||
log.info("Failed to parse json from topic {}", record.topic());
|
||||
}
|
||||
|
||||
return MessageFormat.STRING;
|
||||
}
|
||||
|
||||
private Object parseAvroRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
|
||||
String topic = record.topic();
|
||||
if (record.value() != null && avroDeserializer != null) {
|
||||
byte[] valueBytes = record.value().get();
|
||||
GenericRecord avroRecord = (GenericRecord) avroDeserializer.deserialize(topic, valueBytes);
|
||||
byte[] bytes = AvroSchemaUtils.toJson(avroRecord);
|
||||
return parseJson(bytes);
|
||||
} else {
|
||||
return Map.of();
|
||||
}
|
||||
}
|
||||
|
||||
private Object parseProtobufRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
|
||||
String topic = record.topic();
|
||||
if (record.value() != null && protobufDeserializer != null) {
|
||||
byte[] valueBytes = record.value().get();
|
||||
final Message message = protobufDeserializer.deserialize(topic, valueBytes);
|
||||
byte[] bytes = ProtobufSchemaUtils.toJson(message);
|
||||
return parseJson(bytes);
|
||||
} else {
|
||||
return Map.of();
|
||||
}
|
||||
}
|
||||
|
||||
private Object parseJsonRecord(ConsumerRecord<Bytes, Bytes> record) throws IOException {
|
||||
var value = record.value();
|
||||
if (value == null) {
|
||||
return Map.of();
|
||||
}
|
||||
byte[] valueBytes = value.get();
|
||||
return parseJson(valueBytes);
|
||||
}
|
||||
|
||||
private Object parseJson(byte[] bytes) throws IOException {
|
||||
return objectMapper.readValue(bytes, new TypeReference<Map<String, Object>>() {
|
||||
});
|
||||
}
|
||||
|
||||
private Object parseStringRecord(ConsumerRecord<Bytes, Bytes> record) {
|
||||
String topic = record.topic();
|
||||
if (record.value() == null) {
|
||||
return Map.of();
|
||||
}
|
||||
byte[] valueBytes = record.value().get();
|
||||
return stringDeserializer.deserialize(topic, valueBytes);
|
||||
}
|
||||
|
||||
public enum MessageFormat {
|
||||
AVRO,
|
||||
JSON,
|
||||
STRING,
|
||||
PROTOBUF
|
||||
}
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
package com.provectus.kafka.ui.deserialization;
|
||||
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.serialization.StringDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public class SimpleRecordDeserializer implements RecordDeserializer {
|
||||
|
||||
private final StringDeserializer stringDeserializer = new StringDeserializer();
|
||||
|
||||
@Override
|
||||
public Object deserialize(ConsumerRecord<Bytes, Bytes> record) {
|
||||
if (record.value() != null) {
|
||||
return stringDeserializer.deserialize(record.topic(), record.value().get());
|
||||
} else {
|
||||
return "empty";
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
||||
import java.time.Duration;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
@Log4j2
|
||||
public class BackwardRecordEmitter
|
||||
implements java.util.function.Consumer<FluxSink<ConsumerRecord<Bytes, Bytes>>> {
|
||||
|
||||
private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
|
||||
|
||||
private final Function<Map<String, Object>, KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final OffsetsSeekBackward offsetsSeek;
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
|
||||
try (KafkaConsumer<Bytes, Bytes> configConsumer = consumerSupplier.apply(Map.of())) {
|
||||
final List<TopicPartition> requestedPartitions =
|
||||
offsetsSeek.getRequestedPartitions(configConsumer);
|
||||
final int msgsPerPartition = offsetsSeek.msgsPerPartition(requestedPartitions.size());
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer =
|
||||
consumerSupplier.apply(
|
||||
Map.of(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, msgsPerPartition)
|
||||
)
|
||||
) {
|
||||
final Map<TopicPartition, Long> partitionsOffsets =
|
||||
offsetsSeek.getPartitionsOffsets(consumer);
|
||||
log.debug("partition offsets: {}", partitionsOffsets);
|
||||
var waitingOffsets =
|
||||
offsetsSeek.waitingOffsets(consumer, partitionsOffsets.keySet());
|
||||
log.debug("waittin offsets {} {}",
|
||||
waitingOffsets.getBeginOffsets(),
|
||||
waitingOffsets.getEndOffsets()
|
||||
);
|
||||
while (!sink.isCancelled() && !waitingOffsets.beginReached()) {
|
||||
for (Map.Entry<TopicPartition, Long> entry : partitionsOffsets.entrySet()) {
|
||||
final Long lowest = waitingOffsets.getBeginOffsets().get(entry.getKey().partition());
|
||||
if (lowest != null) {
|
||||
consumer.assign(Collections.singleton(entry.getKey()));
|
||||
final long offset = Math.max(lowest, entry.getValue() - msgsPerPartition);
|
||||
log.debug("Polling {} from {}", entry.getKey(), offset);
|
||||
consumer.seek(entry.getKey(), offset);
|
||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
|
||||
final List<ConsumerRecord<Bytes, Bytes>> partitionRecords =
|
||||
records.records(entry.getKey()).stream()
|
||||
.filter(r -> r.offset() < partitionsOffsets.get(entry.getKey()))
|
||||
.collect(Collectors.toList());
|
||||
Collections.reverse(partitionRecords);
|
||||
|
||||
log.debug("{} records polled", records.count());
|
||||
log.debug("{} records sent", partitionRecords.size());
|
||||
|
||||
// This is workaround for case when partition begin offset is less than
|
||||
// real minimal offset, usually appear in compcated topics
|
||||
if (records.count() > 0 && partitionRecords.isEmpty()) {
|
||||
waitingOffsets.markPolled(entry.getKey().partition());
|
||||
}
|
||||
|
||||
for (ConsumerRecord<Bytes, Bytes> msg : partitionRecords) {
|
||||
if (!sink.isCancelled() && !waitingOffsets.beginReached()) {
|
||||
sink.next(msg);
|
||||
waitingOffsets.markPolled(msg);
|
||||
} else {
|
||||
log.info("Begin reached");
|
||||
break;
|
||||
}
|
||||
}
|
||||
partitionsOffsets.put(
|
||||
entry.getKey(),
|
||||
Math.max(offset, entry.getValue() - msgsPerPartition)
|
||||
);
|
||||
}
|
||||
}
|
||||
if (waitingOffsets.beginReached()) {
|
||||
log.info("begin reached after partitions");
|
||||
} else if (sink.isCancelled()) {
|
||||
log.info("sink is cancelled after partitions");
|
||||
}
|
||||
}
|
||||
sink.complete();
|
||||
log.info("Polling finished");
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.util.OffsetsSeek;
|
||||
import java.time.Duration;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.FluxSink;
|
||||
|
||||
@RequiredArgsConstructor
|
||||
@Log4j2
|
||||
public class ForwardRecordEmitter
|
||||
implements java.util.function.Consumer<FluxSink<ConsumerRecord<Bytes, Bytes>>> {
|
||||
|
||||
private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final OffsetsSeek offsetsSeek;
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
var waitingOffsets = offsetsSeek.assignAndSeek(consumer);
|
||||
while (!sink.isCancelled() && !waitingOffsets.endReached()) {
|
||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
|
||||
log.info("{} records polled", records.count());
|
||||
|
||||
for (ConsumerRecord<Bytes, Bytes> msg : records) {
|
||||
if (!sink.isCancelled() && !waitingOffsets.endReached()) {
|
||||
sink.next(msg);
|
||||
waitingOffsets.markPolled(msg);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
sink.complete();
|
||||
log.info("Polling finished");
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -9,6 +9,8 @@ public enum ErrorCode {
|
|||
|
||||
UNEXPECTED(5000, HttpStatus.INTERNAL_SERVER_ERROR),
|
||||
BINDING_FAIL(4001, HttpStatus.BAD_REQUEST),
|
||||
NOT_FOUND(404, HttpStatus.NOT_FOUND),
|
||||
INVALID_ENTITY_STATE(4001, HttpStatus.BAD_REQUEST),
|
||||
VALIDATION_FAIL(4002, HttpStatus.BAD_REQUEST),
|
||||
READ_ONLY_MODE_ENABLE(4003, HttpStatus.METHOD_NOT_ALLOWED),
|
||||
REBALANCE_IN_PROGRESS(4004, HttpStatus.CONFLICT),
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
package com.provectus.kafka.ui.exception;
|
||||
|
||||
public class IllegalEntityStateException extends CustomBaseException {
|
||||
public IllegalEntityStateException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ErrorCode getErrorCode() {
|
||||
return ErrorCode.INVALID_ENTITY_STATE;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package com.provectus.kafka.ui.exception;
|
||||
|
||||
public class NotFoundException extends CustomBaseException {
|
||||
|
||||
public NotFoundException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ErrorCode getErrorCode() {
|
||||
return ErrorCode.NOT_FOUND;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
package com.provectus.kafka.ui.exception;
|
||||
|
||||
public class TopicMetadataException extends CustomBaseException {
|
||||
|
||||
public TopicMetadataException(String message) {
|
||||
super(message);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ErrorCode getErrorCode() {
|
||||
return ErrorCode.INVALID_ENTITY_STATE;
|
||||
}
|
||||
}
|
|
@ -15,6 +15,7 @@ import com.provectus.kafka.ui.model.InternalBrokerMetrics;
|
|||
import com.provectus.kafka.ui.model.InternalClusterMetrics;
|
||||
import com.provectus.kafka.ui.model.InternalPartition;
|
||||
import com.provectus.kafka.ui.model.InternalReplica;
|
||||
import com.provectus.kafka.ui.model.InternalSchemaRegistry;
|
||||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
|
@ -49,6 +50,7 @@ public interface ClusterMapper {
|
|||
|
||||
@Mapping(target = "protobufFile", source = "protobufFile", qualifiedByName = "resolvePath")
|
||||
@Mapping(target = "properties", source = "properties", qualifiedByName = "setProperties")
|
||||
@Mapping(target = "schemaRegistry", source = ".", qualifiedByName = "setSchemaRegistry")
|
||||
KafkaCluster toKafkaCluster(ClustersProperties.Cluster clusterProperties);
|
||||
|
||||
@Mapping(target = "diskUsage", source = "internalBrokerDiskUsage",
|
||||
|
@ -64,6 +66,24 @@ public interface ClusterMapper {
|
|||
|
||||
Partition toPartition(InternalPartition topic);
|
||||
|
||||
default InternalSchemaRegistry setSchemaRegistry(ClustersProperties.Cluster clusterProperties) {
|
||||
if (clusterProperties == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
InternalSchemaRegistry.InternalSchemaRegistryBuilder internalSchemaRegistry =
|
||||
InternalSchemaRegistry.builder();
|
||||
|
||||
internalSchemaRegistry.url(clusterProperties.getSchemaRegistry());
|
||||
|
||||
if (clusterProperties.getSchemaRegistryAuth() != null) {
|
||||
internalSchemaRegistry.username(clusterProperties.getSchemaRegistryAuth().getUsername());
|
||||
internalSchemaRegistry.password(clusterProperties.getSchemaRegistryAuth().getPassword());
|
||||
}
|
||||
|
||||
return internalSchemaRegistry.build();
|
||||
}
|
||||
|
||||
TopicDetails toTopicDetails(InternalTopic topic);
|
||||
|
||||
default TopicDetails toTopicDetails(InternalTopic topic, InternalClusterMetrics metrics) {
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||
import java.util.Arrays;
|
||||
|
||||
public enum CleanupPolicy {
|
||||
DELETE("delete"),
|
||||
COMPACT("compact"),
|
||||
COMPACT_DELETE("compact, delete"),
|
||||
UNKNOWN("unknown");
|
||||
|
||||
private final String cleanUpPolicy;
|
||||
|
||||
CleanupPolicy(String cleanUpPolicy) {
|
||||
this.cleanUpPolicy = cleanUpPolicy;
|
||||
}
|
||||
|
||||
public String getCleanUpPolicy() {
|
||||
return cleanUpPolicy;
|
||||
}
|
||||
|
||||
public static CleanupPolicy fromString(String string) {
|
||||
return Arrays.stream(CleanupPolicy.values())
|
||||
.filter(v -> v.cleanUpPolicy.equals(string))
|
||||
.findFirst()
|
||||
.orElseThrow(() ->
|
||||
new IllegalEntityStateException("Unknown cleanup policy value: " + string));
|
||||
}
|
||||
}
|
|
@ -2,11 +2,11 @@ package com.provectus.kafka.ui.model;
|
|||
|
||||
import java.util.Map;
|
||||
import lombok.Value;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Value
|
||||
public class ConsumerPosition {
|
||||
|
||||
private SeekType seekType;
|
||||
private Map<Integer, Long> seekTo;
|
||||
|
||||
SeekType seekType;
|
||||
Map<TopicPartition, Long> seekTo;
|
||||
SeekDirection seekDirection;
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ public class ExtendedAdminClient {
|
|||
private final Set<SupportedFeature> supportedFeatures;
|
||||
|
||||
public static Mono<ExtendedAdminClient> extendedAdminClient(AdminClient adminClient) {
|
||||
|
||||
return ClusterUtil.getSupportedFeatures(adminClient)
|
||||
.map(s -> new ExtendedAdminClient(adminClient, s));
|
||||
}
|
||||
|
|
|
@ -27,4 +27,5 @@ public class InternalClusterMetrics {
|
|||
private final Map<Integer, InternalBrokerMetrics> internalBrokerMetrics;
|
||||
private final List<Metric> metrics;
|
||||
private final int zooKeeperStatus;
|
||||
private final String version;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.common.ConsumerGroupState;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public class InternalConsumerGroup {
|
||||
private final String groupId;
|
||||
private final boolean simple;
|
||||
private final Collection<InternalMember> members;
|
||||
private final Map<TopicPartition, OffsetAndMetadata> offsets;
|
||||
private final Map<TopicPartition, Long> endOffsets;
|
||||
private final String partitionAssignor;
|
||||
private final ConsumerGroupState state;
|
||||
private final Node coordinator;
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public static class InternalMember {
|
||||
private final String consumerId;
|
||||
private final String groupInstanceId;
|
||||
private final String clientId;
|
||||
private final String host;
|
||||
private final Set<TopicPartition> assignment;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
|
||||
@Data
|
||||
@Builder(toBuilder = true)
|
||||
public class InternalSchemaRegistry {
|
||||
private final String username;
|
||||
private final String password;
|
||||
private final String url;
|
||||
}
|
|
@ -14,6 +14,7 @@ public class InternalTopic {
|
|||
private final Map<Integer, InternalPartition> partitions;
|
||||
private final List<InternalTopicConfig> topicConfigs;
|
||||
|
||||
private final CleanupPolicy cleanUpPolicy;
|
||||
private final int replicas;
|
||||
private final int partitionCount;
|
||||
private final int inSyncReplicas;
|
||||
|
|
|
@ -11,17 +11,20 @@ import lombok.Data;
|
|||
@Builder(toBuilder = true)
|
||||
public class KafkaCluster {
|
||||
private final String name;
|
||||
private final String version;
|
||||
private final Integer jmxPort;
|
||||
private final String bootstrapServers;
|
||||
private final String zookeeper;
|
||||
private final InternalSchemaRegistry schemaRegistry;
|
||||
private final String ksqldbServer;
|
||||
private final String schemaRegistry;
|
||||
private final List<KafkaConnectCluster> kafkaConnect;
|
||||
private final String schemaNameTemplate;
|
||||
private final String keySchemaNameTemplate;
|
||||
private final ServerStatus status;
|
||||
private final ServerStatus zookeeperStatus;
|
||||
private final InternalClusterMetrics metrics;
|
||||
private final Map<String, InternalTopic> topics;
|
||||
private final List<Integer> brokers;
|
||||
private final Throwable lastKafkaException;
|
||||
private final Throwable lastZookeeperException;
|
||||
private final Path protobufFile;
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
package com.provectus.kafka.ui.model.schemaregistry;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
import lombok.Data;
|
||||
|
||||
@Data
|
||||
public class ErrorResponse {
|
||||
|
||||
@JsonProperty("error_code")
|
||||
private int errorCode;
|
||||
|
||||
private String message;
|
||||
|
||||
}
|
|
@ -1,21 +1,24 @@
|
|||
package com.provectus.kafka.ui.deserialization;
|
||||
package com.provectus.kafka.ui.serde;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.serde.schemaregistry.SchemaRegistryAwareRecordSerDe;
|
||||
import com.provectus.kafka.ui.service.ClustersStorage;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.PostConstruct;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Log4j2
|
||||
@Component
|
||||
@RequiredArgsConstructor
|
||||
public class DeserializationService {
|
||||
|
||||
private final ClustersStorage clustersStorage;
|
||||
private final ObjectMapper objectMapper;
|
||||
private Map<String, RecordDeserializer> clusterDeserializers;
|
||||
private Map<String, RecordSerDe> clusterDeserializers;
|
||||
|
||||
|
||||
@PostConstruct
|
||||
|
@ -27,20 +30,22 @@ public class DeserializationService {
|
|||
));
|
||||
}
|
||||
|
||||
private RecordDeserializer createRecordDeserializerForCluster(KafkaCluster cluster) {
|
||||
private RecordSerDe createRecordDeserializerForCluster(KafkaCluster cluster) {
|
||||
try {
|
||||
if (cluster.getProtobufFile() != null) {
|
||||
return new ProtobufFileRecordDeserializer(cluster.getProtobufFile(),
|
||||
log.info("Using ProtobufFileRecordSerDe for cluster '{}'", cluster.getName());
|
||||
return new ProtobufFileRecordSerDe(cluster.getProtobufFile(),
|
||||
cluster.getProtobufMessageName(), objectMapper);
|
||||
} else {
|
||||
return new SchemaRegistryRecordDeserializer(cluster, objectMapper);
|
||||
log.info("Using SchemaRegistryAwareRecordSerDe for cluster '{}'", cluster.getName());
|
||||
return new SchemaRegistryAwareRecordSerDe(cluster);
|
||||
}
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Can't init deserializer", e);
|
||||
}
|
||||
}
|
||||
|
||||
public RecordDeserializer getRecordDeserializerForCluster(KafkaCluster cluster) {
|
||||
public RecordSerDe getRecordDeserializerForCluster(KafkaCluster cluster) {
|
||||
return clusterDeserializers.get(cluster.getName());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
package com.provectus.kafka.ui.serde;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.google.protobuf.util.JsonFormat;
|
||||
import com.provectus.kafka.ui.model.MessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonSchema;
|
||||
import com.provectus.kafka.ui.util.jsonschema.ProtobufSchemaConverter;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
//TODO: currently we assume that keys for this serde are always string - need to discuss if it is ok
|
||||
public class ProtobufFileRecordSerDe implements RecordSerDe {
|
||||
private final ProtobufSchema protobufSchema;
|
||||
private final ObjectMapper objectMapper;
|
||||
private final Path protobufSchemaPath;
|
||||
private final ProtobufSchemaConverter schemaConverter = new ProtobufSchemaConverter();
|
||||
|
||||
public ProtobufFileRecordSerDe(Path protobufSchemaPath, String messageName,
|
||||
ObjectMapper objectMapper) throws IOException {
|
||||
this.objectMapper = objectMapper;
|
||||
this.protobufSchemaPath = protobufSchemaPath;
|
||||
try (final Stream<String> lines = Files.lines(protobufSchemaPath)) {
|
||||
this.protobufSchema = new ProtobufSchema(
|
||||
lines.collect(Collectors.joining())
|
||||
).copy(messageName);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg) {
|
||||
try {
|
||||
return new DeserializedKeyValue(
|
||||
msg.key() != null ? new String(msg.key().get()) : null,
|
||||
msg.value() != null ? parse(msg.value().get()) : null
|
||||
);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to parse record from topic " + msg.topic(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private String parse(byte[] value) {
|
||||
DynamicMessage protoMsg = DynamicMessage.parseFrom(
|
||||
protobufSchema.toDescriptor(),
|
||||
new ByteArrayInputStream(value)
|
||||
);
|
||||
byte[] jsonFromProto = ProtobufSchemaUtils.toJson(protoMsg);
|
||||
return new String(jsonFromProto);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerRecord<byte[], byte[]> serialize(String topic,
|
||||
@Nullable String key,
|
||||
@Nullable String data,
|
||||
@Nullable Integer partition) {
|
||||
if (data == null) {
|
||||
return new ProducerRecord<>(topic, partition, Objects.requireNonNull(key).getBytes(), null);
|
||||
}
|
||||
DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
|
||||
try {
|
||||
JsonFormat.parser().merge(data, builder);
|
||||
final DynamicMessage message = builder.build();
|
||||
return new ProducerRecord<>(
|
||||
topic,
|
||||
partition,
|
||||
Optional.ofNullable(key).map(String::getBytes).orElse(null),
|
||||
message.toByteArray()
|
||||
);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to merge record for topic " + topic, e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageSchema getTopicSchema(String topic) {
|
||||
|
||||
final JsonSchema jsonSchema = schemaConverter.convert(
|
||||
protobufSchemaPath.toUri(),
|
||||
protobufSchema.toDescriptor()
|
||||
);
|
||||
final MessageSchema keySchema = new MessageSchema()
|
||||
.name(protobufSchema.fullName())
|
||||
.source(MessageSchema.SourceEnum.PROTO_FILE)
|
||||
.schema(JsonSchema.stringSchema().toJson(objectMapper));
|
||||
|
||||
final MessageSchema valueSchema = new MessageSchema()
|
||||
.name(protobufSchema.fullName())
|
||||
.source(MessageSchema.SourceEnum.PROTO_FILE)
|
||||
.schema(jsonSchema.toJson(objectMapper));
|
||||
|
||||
return new TopicMessageSchema()
|
||||
.key(keySchema)
|
||||
.value(valueSchema);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
package com.provectus.kafka.ui.serde;
|
||||
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.Value;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public interface RecordSerDe {
|
||||
|
||||
@Value
|
||||
class DeserializedKeyValue {
|
||||
@Nullable String key;
|
||||
@Nullable String value;
|
||||
}
|
||||
|
||||
DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg);
|
||||
|
||||
ProducerRecord<byte[], byte[]> serialize(String topic,
|
||||
@Nullable String key,
|
||||
@Nullable String data,
|
||||
@Nullable Integer partition);
|
||||
|
||||
TopicMessageSchema getTopicSchema(String topic);
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package com.provectus.kafka.ui.serde;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.model.MessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonSchema;
|
||||
import javax.annotation.Nullable;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
public class SimpleRecordSerDe implements RecordSerDe {
|
||||
|
||||
@Override
|
||||
public DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg) {
|
||||
return new DeserializedKeyValue(
|
||||
msg.key() != null ? new String(msg.key().get()) : null,
|
||||
msg.value() != null ? new String(msg.value().get()) : null
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerRecord<byte[], byte[]> serialize(String topic,
|
||||
@Nullable String key,
|
||||
@Nullable String data,
|
||||
@Nullable Integer partition) {
|
||||
return new ProducerRecord<>(
|
||||
topic,
|
||||
partition,
|
||||
key != null ? key.getBytes() : null,
|
||||
data != null ? data.getBytes() : null
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageSchema getTopicSchema(String topic) {
|
||||
final MessageSchema schema = new MessageSchema()
|
||||
.name("unknown")
|
||||
.source(MessageSchema.SourceEnum.UNKNOWN)
|
||||
.schema(JsonSchema.stringSchema().toJson(new ObjectMapper()));
|
||||
return new TopicMessageSchema()
|
||||
.key(schema)
|
||||
.value(schema);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.serializers.KafkaAvroDeserializer;
|
||||
import lombok.SneakyThrows;
|
||||
import org.apache.avro.generic.GenericRecord;
|
||||
|
||||
public class AvroMessageFormatter implements MessageFormatter {
|
||||
private final KafkaAvroDeserializer avroDeserializer;
|
||||
|
||||
public AvroMessageFormatter(SchemaRegistryClient client) {
|
||||
this.avroDeserializer = new KafkaAvroDeserializer(client);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SneakyThrows
|
||||
public String format(String topic, byte[] value) {
|
||||
GenericRecord avroRecord = (GenericRecord) avroDeserializer.deserialize(topic, value);
|
||||
byte[] jsonBytes = AvroSchemaUtils.toJson(avroRecord);
|
||||
return new String(jsonBytes);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchemaUtils;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
|
||||
import io.confluent.kafka.serializers.KafkaAvroSerializer;
|
||||
import io.confluent.kafka.serializers.KafkaAvroSerializerConfig;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
public class AvroMessageReader extends MessageReader<Object> {
|
||||
|
||||
public AvroMessageReader(String topic, boolean isKey,
|
||||
SchemaRegistryClient client,
|
||||
SchemaMetadata schema)
|
||||
throws IOException, RestClientException {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<Object> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaAvroSerializer(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
KafkaAvroSerializerConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
KafkaAvroSerializerConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Object read(String value, ParsedSchema schema) {
|
||||
try {
|
||||
return AvroSchemaUtils.toObject(value, (AvroSchema) schema);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.serializers.json.KafkaJsonSchemaDeserializer;
|
||||
|
||||
public class JsonSchemaMessageFormatter implements MessageFormatter {
|
||||
|
||||
private final KafkaJsonSchemaDeserializer<JsonNode> jsonSchemaDeserializer;
|
||||
|
||||
public JsonSchemaMessageFormatter(SchemaRegistryClient client) {
|
||||
this.jsonSchemaDeserializer = new KafkaJsonSchemaDeserializer<>(client);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String format(String topic, byte[] value) {
|
||||
JsonNode json = jsonSchemaDeserializer.deserialize(topic, value);
|
||||
return json.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonProcessingException;
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.util.annotations.KafkaClientInternalsDependant;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchema;
|
||||
import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializer;
|
||||
import io.confluent.kafka.serializers.json.KafkaJsonSchemaSerializerConfig;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
public class JsonSchemaMessageReader extends MessageReader<JsonNode> {
|
||||
|
||||
private static final ObjectMapper MAPPER = new ObjectMapper();
|
||||
|
||||
public JsonSchemaMessageReader(String topic,
|
||||
boolean isKey,
|
||||
SchemaRegistryClient client,
|
||||
SchemaMetadata schema) throws IOException, RestClientException {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<JsonNode> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaJsonSchemaSerializerWithoutSchemaInfer(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
KafkaJsonSchemaSerializerConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
KafkaJsonSchemaSerializerConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected JsonNode read(String value, ParsedSchema schema) {
|
||||
try {
|
||||
JsonNode json = MAPPER.readTree(value);
|
||||
((JsonSchema) schema).validate(json);
|
||||
return json;
|
||||
} catch (JsonProcessingException e) {
|
||||
throw new ValidationException(String.format("'%s' is not valid json", value));
|
||||
} catch (org.everit.json.schema.ValidationException e) {
|
||||
throw new ValidationException(
|
||||
String.format("'%s' does not fit schema: %s", value, e.getAllMessages()));
|
||||
}
|
||||
}
|
||||
|
||||
@KafkaClientInternalsDependant
|
||||
private class KafkaJsonSchemaSerializerWithoutSchemaInfer
|
||||
extends KafkaJsonSchemaSerializer<JsonNode> {
|
||||
|
||||
KafkaJsonSchemaSerializerWithoutSchemaInfer(SchemaRegistryClient client) {
|
||||
super(client);
|
||||
}
|
||||
|
||||
/**
|
||||
* Need to override original method because it tries to infer schema from input
|
||||
* by checking 'schema' json field or @Schema annotation on input class, which is not
|
||||
* possible in our case. So, we just skip all infer logic and pass schema directly.
|
||||
*/
|
||||
@Override
|
||||
public byte[] serialize(String topic, JsonNode record) {
|
||||
return super.serializeImpl(
|
||||
super.getSubjectName(topic, isKey, record, schema),
|
||||
record,
|
||||
(JsonSchema) schema
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
public enum MessageFormat {
|
||||
AVRO,
|
||||
JSON,
|
||||
PROTOBUF
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
public interface MessageFormatter {
|
||||
String format(String topic, byte[] value);
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
|
||||
import java.io.IOException;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
public abstract class MessageReader<T> {
|
||||
protected final Serializer<T> serializer;
|
||||
protected final String topic;
|
||||
protected final boolean isKey;
|
||||
protected final ParsedSchema schema;
|
||||
|
||||
protected MessageReader(String topic, boolean isKey, SchemaRegistryClient client,
|
||||
SchemaMetadata schema) throws IOException, RestClientException {
|
||||
this.topic = topic;
|
||||
this.isKey = isKey;
|
||||
this.serializer = createSerializer(client);
|
||||
this.schema = client.getSchemaById(schema.getId());
|
||||
}
|
||||
|
||||
protected abstract Serializer<T> createSerializer(SchemaRegistryClient client);
|
||||
|
||||
public byte[] read(String value) {
|
||||
final T read = this.read(value, schema);
|
||||
return this.serializer.serialize(topic, read);
|
||||
}
|
||||
|
||||
protected abstract T read(String value, ParsedSchema schema);
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
import com.google.protobuf.Message;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaUtils;
|
||||
import io.confluent.kafka.serializers.protobuf.KafkaProtobufDeserializer;
|
||||
import lombok.SneakyThrows;
|
||||
|
||||
public class ProtobufMessageFormatter implements MessageFormatter {
|
||||
private final KafkaProtobufDeserializer<?> protobufDeserializer;
|
||||
|
||||
public ProtobufMessageFormatter(SchemaRegistryClient client) {
|
||||
this.protobufDeserializer = new KafkaProtobufDeserializer<>(client);
|
||||
}
|
||||
|
||||
@Override
|
||||
@SneakyThrows
|
||||
public String format(String topic, byte[] value) {
|
||||
final Message message = protobufDeserializer.deserialize(topic, value);
|
||||
byte[] jsonBytes = ProtobufSchemaUtils.toJson(message);
|
||||
return new String(jsonBytes);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
import com.google.protobuf.DynamicMessage;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.util.JsonFormat;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializer;
|
||||
import io.confluent.kafka.serializers.protobuf.KafkaProtobufSerializerConfig;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import org.apache.kafka.common.serialization.Serializer;
|
||||
|
||||
public class ProtobufMessageReader extends MessageReader<Message> {
|
||||
|
||||
public ProtobufMessageReader(String topic, boolean isKey,
|
||||
SchemaRegistryClient client, SchemaMetadata schema)
|
||||
throws IOException, RestClientException {
|
||||
super(topic, isKey, client, schema);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Serializer<Message> createSerializer(SchemaRegistryClient client) {
|
||||
var serializer = new KafkaProtobufSerializer<>(client);
|
||||
serializer.configure(
|
||||
Map.of(
|
||||
"schema.registry.url", "wontbeused",
|
||||
KafkaProtobufSerializerConfig.AUTO_REGISTER_SCHEMAS, false,
|
||||
KafkaProtobufSerializerConfig.USE_LATEST_VERSION, true
|
||||
),
|
||||
isKey
|
||||
);
|
||||
return serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Message read(String value, ParsedSchema schema) {
|
||||
ProtobufSchema protobufSchema = (ProtobufSchema) schema;
|
||||
DynamicMessage.Builder builder = protobufSchema.newMessageBuilder();
|
||||
try {
|
||||
JsonFormat.parser().merge(value, builder);
|
||||
return builder.build();
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to serialize record for topic " + topic, e);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,322 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
|
||||
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.BASIC_AUTH_CREDENTIALS_SOURCE;
|
||||
import static io.confluent.kafka.serializers.AbstractKafkaSchemaSerDeConfig.USER_INFO_CONFIG;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.MessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.jsonschema.AvroJsonSchemaConverter;
|
||||
import com.provectus.kafka.ui.util.jsonschema.JsonSchema;
|
||||
import com.provectus.kafka.ui.util.jsonschema.ProtobufSchemaConverter;
|
||||
import io.confluent.kafka.schemaregistry.ParsedSchema;
|
||||
import io.confluent.kafka.schemaregistry.SchemaProvider;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchema;
|
||||
import io.confluent.kafka.schemaregistry.avro.AvroSchemaProvider;
|
||||
import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaMetadata;
|
||||
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
|
||||
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
|
||||
import io.confluent.kafka.schemaregistry.json.JsonSchemaProvider;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchema;
|
||||
import io.confluent.kafka.schemaregistry.protobuf.ProtobufSchemaProvider;
|
||||
import java.net.URI;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
@Log4j2
|
||||
public class SchemaRegistryAwareRecordSerDe implements RecordSerDe {
|
||||
|
||||
private static final int CLIENT_IDENTITY_MAP_CAPACITY = 100;
|
||||
|
||||
private final KafkaCluster cluster;
|
||||
private final Map<String, MessageFormatter> valueFormatMap = new ConcurrentHashMap<>();
|
||||
private final Map<String, MessageFormatter> keyFormatMap = new ConcurrentHashMap<>();
|
||||
|
||||
@Nullable
|
||||
private final SchemaRegistryClient schemaRegistryClient;
|
||||
|
||||
@Nullable
|
||||
private final AvroMessageFormatter avroFormatter;
|
||||
|
||||
@Nullable
|
||||
private final ProtobufMessageFormatter protobufFormatter;
|
||||
|
||||
@Nullable
|
||||
private final JsonSchemaMessageFormatter jsonSchemaMessageFormatter;
|
||||
|
||||
private final StringMessageFormatter stringFormatter = new StringMessageFormatter();
|
||||
private final ProtobufSchemaConverter protoSchemaConverter = new ProtobufSchemaConverter();
|
||||
private final AvroJsonSchemaConverter avroSchemaConverter = new AvroJsonSchemaConverter();
|
||||
private final ObjectMapper objectMapper = new ObjectMapper();
|
||||
|
||||
private static SchemaRegistryClient createSchemaRegistryClient(KafkaCluster cluster) {
|
||||
Objects.requireNonNull(cluster.getSchemaRegistry());
|
||||
Objects.requireNonNull(cluster.getSchemaRegistry().getUrl());
|
||||
List<SchemaProvider> schemaProviders =
|
||||
List.of(new AvroSchemaProvider(), new ProtobufSchemaProvider(), new JsonSchemaProvider());
|
||||
|
||||
Map<String, String> configs = new HashMap<>();
|
||||
String username = cluster.getSchemaRegistry().getUsername();
|
||||
String password = cluster.getSchemaRegistry().getPassword();
|
||||
|
||||
if (username != null && password != null) {
|
||||
configs.put(BASIC_AUTH_CREDENTIALS_SOURCE, "USER_INFO");
|
||||
configs.put(USER_INFO_CONFIG, username + ":" + password);
|
||||
} else if (username != null) {
|
||||
throw new ValidationException(
|
||||
"You specified username but do not specified password");
|
||||
} else if (password != null) {
|
||||
throw new ValidationException(
|
||||
"You specified password but do not specified username");
|
||||
}
|
||||
return new CachedSchemaRegistryClient(
|
||||
Collections.singletonList(cluster.getSchemaRegistry().getUrl()),
|
||||
CLIENT_IDENTITY_MAP_CAPACITY,
|
||||
schemaProviders,
|
||||
configs
|
||||
);
|
||||
}
|
||||
|
||||
public SchemaRegistryAwareRecordSerDe(KafkaCluster cluster) {
|
||||
this.cluster = cluster;
|
||||
this.schemaRegistryClient = cluster.getSchemaRegistry() != null
|
||||
? createSchemaRegistryClient(cluster)
|
||||
: null;
|
||||
if (schemaRegistryClient != null) {
|
||||
this.avroFormatter = new AvroMessageFormatter(schemaRegistryClient);
|
||||
this.protobufFormatter = new ProtobufMessageFormatter(schemaRegistryClient);
|
||||
this.jsonSchemaMessageFormatter = new JsonSchemaMessageFormatter(schemaRegistryClient);
|
||||
} else {
|
||||
this.avroFormatter = null;
|
||||
this.protobufFormatter = null;
|
||||
this.jsonSchemaMessageFormatter = null;
|
||||
}
|
||||
}
|
||||
|
||||
public DeserializedKeyValue deserialize(ConsumerRecord<Bytes, Bytes> msg) {
|
||||
try {
|
||||
return new DeserializedKeyValue(
|
||||
msg.key() != null
|
||||
? getMessageFormatter(msg, true).format(msg.topic(), msg.key().get())
|
||||
: null,
|
||||
msg.value() != null
|
||||
? getMessageFormatter(msg, false).format(msg.topic(), msg.value().get())
|
||||
: null
|
||||
);
|
||||
} catch (Throwable e) {
|
||||
throw new RuntimeException("Failed to parse record from topic " + msg.topic(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ProducerRecord<byte[], byte[]> serialize(String topic,
|
||||
@Nullable String key,
|
||||
@Nullable String data,
|
||||
@Nullable Integer partition) {
|
||||
final Optional<SchemaMetadata> maybeValueSchema = getSchemaBySubject(topic, false);
|
||||
final Optional<SchemaMetadata> maybeKeySchema = getSchemaBySubject(topic, true);
|
||||
|
||||
final byte[] serializedValue = data != null
|
||||
? serialize(maybeValueSchema, topic, data, false)
|
||||
: null;
|
||||
final byte[] serializedKey = key != null
|
||||
? serialize(maybeKeySchema, topic, key, true)
|
||||
: null;
|
||||
|
||||
return new ProducerRecord<>(topic, partition, serializedKey, serializedValue);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private byte[] serialize(
|
||||
Optional<SchemaMetadata> maybeSchema, String topic, String value, boolean isKey) {
|
||||
if (maybeSchema.isPresent()) {
|
||||
final SchemaMetadata schema = maybeSchema.get();
|
||||
|
||||
MessageReader<?> reader;
|
||||
if (schema.getSchemaType().equals(MessageFormat.PROTOBUF.name())) {
|
||||
reader = new ProtobufMessageReader(topic, isKey, schemaRegistryClient, schema);
|
||||
} else if (schema.getSchemaType().equals(MessageFormat.AVRO.name())) {
|
||||
reader = new AvroMessageReader(topic, isKey, schemaRegistryClient, schema);
|
||||
} else if (schema.getSchemaType().equals(MessageFormat.JSON.name())) {
|
||||
reader = new JsonSchemaMessageReader(topic, isKey, schemaRegistryClient, schema);
|
||||
} else {
|
||||
throw new IllegalStateException("Unsupported schema type: " + schema.getSchemaType());
|
||||
}
|
||||
|
||||
return reader.read(value);
|
||||
} else {
|
||||
// if no schema provided serialize input as raw string
|
||||
return value.getBytes();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TopicMessageSchema getTopicSchema(String topic) {
|
||||
final Optional<SchemaMetadata> maybeValueSchema = getSchemaBySubject(topic, false);
|
||||
final Optional<SchemaMetadata> maybeKeySchema = getSchemaBySubject(topic, true);
|
||||
|
||||
String sourceValueSchema = maybeValueSchema.map(this::convertSchema)
|
||||
.orElseGet(() -> JsonSchema.stringSchema().toJson(objectMapper));
|
||||
|
||||
String sourceKeySchema = maybeKeySchema.map(this::convertSchema)
|
||||
.orElseGet(() -> JsonSchema.stringSchema().toJson(objectMapper));
|
||||
|
||||
final MessageSchema keySchema = new MessageSchema()
|
||||
.name(maybeKeySchema.map(
|
||||
(s) -> schemaSubject(topic, true)
|
||||
).orElse("unknown"))
|
||||
.source(MessageSchema.SourceEnum.SCHEMA_REGISTRY)
|
||||
.schema(sourceKeySchema);
|
||||
|
||||
final MessageSchema valueSchema = new MessageSchema()
|
||||
.name(maybeValueSchema.map(
|
||||
(s) -> schemaSubject(topic, false)
|
||||
).orElse("unknown"))
|
||||
.source(MessageSchema.SourceEnum.SCHEMA_REGISTRY)
|
||||
.schema(sourceValueSchema);
|
||||
|
||||
return new TopicMessageSchema()
|
||||
.key(keySchema)
|
||||
.value(valueSchema);
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private String convertSchema(SchemaMetadata schema) {
|
||||
|
||||
String jsonSchema;
|
||||
URI basePath = new URI(cluster.getSchemaRegistry().getUrl())
|
||||
.resolve(Integer.toString(schema.getId()));
|
||||
final ParsedSchema schemaById = Objects.requireNonNull(schemaRegistryClient)
|
||||
.getSchemaById(schema.getId());
|
||||
|
||||
if (schema.getSchemaType().equals(MessageFormat.PROTOBUF.name())) {
|
||||
final ProtobufSchema protobufSchema = (ProtobufSchema) schemaById;
|
||||
jsonSchema = protoSchemaConverter
|
||||
.convert(basePath, protobufSchema.toDescriptor())
|
||||
.toJson(objectMapper);
|
||||
} else if (schema.getSchemaType().equals(MessageFormat.AVRO.name())) {
|
||||
final AvroSchema avroSchema = (AvroSchema) schemaById;
|
||||
jsonSchema = avroSchemaConverter
|
||||
.convert(basePath, avroSchema.rawSchema())
|
||||
.toJson(objectMapper);
|
||||
} else if (schema.getSchemaType().equals(MessageFormat.JSON.name())) {
|
||||
jsonSchema = schema.getSchema();
|
||||
} else {
|
||||
jsonSchema = JsonSchema.stringSchema().toJson(objectMapper);
|
||||
}
|
||||
|
||||
return jsonSchema;
|
||||
}
|
||||
|
||||
private MessageFormatter getMessageFormatter(ConsumerRecord<Bytes, Bytes> msg, boolean isKey) {
|
||||
if (isKey) {
|
||||
return keyFormatMap.computeIfAbsent(msg.topic(), k -> detectFormat(msg, true));
|
||||
} else {
|
||||
return valueFormatMap.computeIfAbsent(msg.topic(), k -> detectFormat(msg, false));
|
||||
}
|
||||
}
|
||||
|
||||
private MessageFormatter detectFormat(ConsumerRecord<Bytes, Bytes> msg, boolean isKey) {
|
||||
if (schemaRegistryClient != null) {
|
||||
try {
|
||||
final Optional<String> type = getSchemaFromMessage(msg, isKey)
|
||||
.or(() -> getSchemaBySubject(msg.topic(), isKey).map(SchemaMetadata::getSchemaType));
|
||||
if (type.isPresent()) {
|
||||
if (type.get().equals(MessageFormat.PROTOBUF.name())) {
|
||||
if (tryFormatter(protobufFormatter, msg, isKey).isPresent()) {
|
||||
return protobufFormatter;
|
||||
}
|
||||
} else if (type.get().equals(MessageFormat.AVRO.name())) {
|
||||
if (tryFormatter(avroFormatter, msg, isKey).isPresent()) {
|
||||
return avroFormatter;
|
||||
}
|
||||
} else if (type.get().equals(MessageFormat.JSON.name())) {
|
||||
if (tryFormatter(jsonSchemaMessageFormatter, msg, isKey).isPresent()) {
|
||||
return jsonSchemaMessageFormatter;
|
||||
}
|
||||
} else {
|
||||
throw new IllegalStateException("Unsupported schema type: " + type.get());
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
log.warn("Failed to get Schema for topic {}", msg.topic(), e);
|
||||
}
|
||||
}
|
||||
return stringFormatter;
|
||||
}
|
||||
|
||||
private Optional<MessageFormatter> tryFormatter(
|
||||
MessageFormatter formatter, ConsumerRecord<Bytes, Bytes> msg, boolean isKey) {
|
||||
try {
|
||||
formatter.format(msg.topic(), isKey ? msg.key().get() : msg.value().get());
|
||||
return Optional.of(formatter);
|
||||
} catch (Throwable e) {
|
||||
log.warn("Failed to parse by {} from topic {}", formatter.getClass(), msg.topic(), e);
|
||||
}
|
||||
|
||||
return Optional.empty();
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private Optional<String> getSchemaFromMessage(ConsumerRecord<Bytes, Bytes> msg, boolean isKey) {
|
||||
Optional<String> result = Optional.empty();
|
||||
final Bytes value = isKey ? msg.key() : msg.value();
|
||||
if (value != null) {
|
||||
ByteBuffer buffer = ByteBuffer.wrap(value.get());
|
||||
if (buffer.get() == 0) {
|
||||
int id = buffer.getInt();
|
||||
result =
|
||||
Optional.ofNullable(schemaRegistryClient)
|
||||
.flatMap(client -> wrapClientCall(() -> client.getSchemaById(id)))
|
||||
.map(ParsedSchema::schemaType);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private Optional<SchemaMetadata> getSchemaBySubject(String topic, boolean isKey) {
|
||||
return Optional.ofNullable(schemaRegistryClient)
|
||||
.flatMap(client ->
|
||||
wrapClientCall(() ->
|
||||
client.getLatestSchemaMetadata(schemaSubject(topic, isKey))));
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private <T> Optional<T> wrapClientCall(Callable<T> call) {
|
||||
try {
|
||||
return Optional.ofNullable(call.call());
|
||||
} catch (RestClientException restClientException) {
|
||||
if (restClientException.getStatus() == 404) {
|
||||
return Optional.empty();
|
||||
} else {
|
||||
throw new RuntimeException("Error calling SchemaRegistryClient", restClientException);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String schemaSubject(String topic, boolean isKey) {
|
||||
return String.format(
|
||||
isKey ? cluster.getKeySchemaNameTemplate()
|
||||
: cluster.getSchemaNameTemplate(), topic
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package com.provectus.kafka.ui.serde.schemaregistry;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
public class StringMessageFormatter implements MessageFormatter {
|
||||
|
||||
@Override
|
||||
public String format(String topic, byte[] value) {
|
||||
return new String(value, StandardCharsets.UTF_8);
|
||||
}
|
||||
}
|
|
@ -1,7 +1,10 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
import com.provectus.kafka.ui.exception.TopicNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.BrokerMetrics;
|
||||
|
@ -11,44 +14,54 @@ import com.provectus.kafka.ui.model.ClusterStats;
|
|||
import com.provectus.kafka.ui.model.ConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetails;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessage;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncrease;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncreaseResponse;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChange;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChangeResponse;
|
||||
import com.provectus.kafka.ui.model.Topic;
|
||||
import com.provectus.kafka.ui.model.TopicColumnsToSort;
|
||||
import com.provectus.kafka.ui.model.TopicConfig;
|
||||
import com.provectus.kafka.ui.model.TopicConsumerGroups;
|
||||
import com.provectus.kafka.ui.model.TopicCreation;
|
||||
import com.provectus.kafka.ui.model.TopicDetails;
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.model.TopicMessageSchema;
|
||||
import com.provectus.kafka.ui.model.TopicUpdate;
|
||||
import com.provectus.kafka.ui.model.TopicsResponse;
|
||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.clients.admin.DeleteConsumerGroupsResult;
|
||||
import org.apache.kafka.common.errors.GroupIdNotFoundException;
|
||||
import org.apache.kafka.common.errors.GroupNotEmptyException;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
@Log4j2
|
||||
public class ClusterService {
|
||||
private static final Integer DEFAULT_PAGE_SIZE = 20;
|
||||
private static final Integer DEFAULT_PAGE_SIZE = 25;
|
||||
|
||||
private final ClustersStorage clustersStorage;
|
||||
private final ClusterMapper clusterMapper;
|
||||
private final KafkaService kafkaService;
|
||||
private final ConsumingService consumingService;
|
||||
private final DeserializationService deserializationService;
|
||||
|
||||
public List<Cluster> getClusters() {
|
||||
return clustersStorage.getKafkaClusters()
|
||||
|
@ -91,7 +104,7 @@ public class ClusterService {
|
|||
var topicsToSkip = (page.filter(positiveInt).orElse(1) - 1) * perPage;
|
||||
var cluster = clustersStorage.getClusterByName(name)
|
||||
.orElseThrow(ClusterNotFoundException::new);
|
||||
List<Topic> topics = cluster.getTopics().values().stream()
|
||||
List<InternalTopic> topics = cluster.getTopics().values().stream()
|
||||
.filter(topic -> !topic.isInternal()
|
||||
|| showInternal
|
||||
.map(i -> topic.isInternal() == i)
|
||||
|
@ -101,7 +114,6 @@ public class ClusterService {
|
|||
.map(s -> StringUtils.containsIgnoreCase(topic.getName(), s))
|
||||
.orElse(true))
|
||||
.sorted(getComparatorForTopic(sortBy))
|
||||
.map(clusterMapper::toTopic)
|
||||
.collect(Collectors.toList());
|
||||
var totalPages = (topics.size() / perPage)
|
||||
+ (topics.size() % perPage == 0 ? 0 : 1);
|
||||
|
@ -111,6 +123,13 @@ public class ClusterService {
|
|||
topics.stream()
|
||||
.skip(topicsToSkip)
|
||||
.limit(perPage)
|
||||
.map(t ->
|
||||
clusterMapper.toTopic(
|
||||
t.toBuilder().partitions(
|
||||
kafkaService.getTopicPartitions(cluster, t)
|
||||
).build()
|
||||
)
|
||||
)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
|
@ -125,6 +144,8 @@ public class ClusterService {
|
|||
return Comparator.comparing(InternalTopic::getPartitionCount);
|
||||
case OUT_OF_SYNC_REPLICAS:
|
||||
return Comparator.comparing(t -> t.getReplicas() - t.getInSyncReplicas());
|
||||
case REPLICATION_FACTOR:
|
||||
return Comparator.comparing(InternalTopic::getReplicationFactor);
|
||||
case NAME:
|
||||
default:
|
||||
return defaultComparator;
|
||||
|
@ -164,46 +185,26 @@ public class ClusterService {
|
|||
public Mono<ConsumerGroupDetails> getConsumerGroupDetail(String clusterName,
|
||||
String consumerGroupId) {
|
||||
var cluster = clustersStorage.getClusterByName(clusterName).orElseThrow(Throwable::new);
|
||||
|
||||
return kafkaService.getOrCreateAdminClient(cluster).map(ac ->
|
||||
ac.getAdminClient().describeConsumerGroups(Collections.singletonList(consumerGroupId)).all()
|
||||
).flatMap(groups ->
|
||||
kafkaService.groupMetadata(cluster, consumerGroupId)
|
||||
.flatMap(offsets -> {
|
||||
Map<TopicPartition, Long> endOffsets =
|
||||
kafkaService.topicPartitionsEndOffsets(cluster, offsets.keySet());
|
||||
return ClusterUtil.toMono(groups).map(s ->
|
||||
Tuples.of(
|
||||
s.get(consumerGroupId),
|
||||
s.get(consumerGroupId).members().stream()
|
||||
.flatMap(c ->
|
||||
Stream.of(
|
||||
ClusterUtil.convertToConsumerTopicPartitionDetails(
|
||||
c, offsets, endOffsets, consumerGroupId
|
||||
)
|
||||
)
|
||||
)
|
||||
.collect(Collectors.toList()).stream()
|
||||
.flatMap(t ->
|
||||
t.stream().flatMap(Stream::of)
|
||||
).collect(Collectors.toList())
|
||||
)
|
||||
);
|
||||
}).map(c -> ClusterUtil.convertToConsumerGroupDetails(c.getT1(), c.getT2()))
|
||||
return kafkaService.getConsumerGroups(
|
||||
cluster,
|
||||
Optional.empty(),
|
||||
Collections.singletonList(consumerGroupId)
|
||||
).filter(groups -> !groups.isEmpty()).map(groups -> groups.get(0)).map(
|
||||
ClusterUtil::convertToConsumerGroupDetails
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
||||
.flatMap(kafkaService::getConsumerGroups);
|
||||
return getConsumerGroups(clusterName, Optional.empty());
|
||||
}
|
||||
|
||||
public Mono<TopicConsumerGroups> getTopicConsumerGroupDetail(
|
||||
String clusterName, String topicName) {
|
||||
public Mono<List<ConsumerGroup>> getConsumerGroups(String clusterName, Optional<String> topic) {
|
||||
return Mono.justOrEmpty(clustersStorage.getClusterByName(clusterName))
|
||||
.switchIfEmpty(Mono.error(ClusterNotFoundException::new))
|
||||
.flatMap(c -> kafkaService.getTopicConsumerGroups(c, topicName));
|
||||
.flatMap(c -> kafkaService.getConsumerGroups(c, topic, Collections.emptyList()))
|
||||
.map(c ->
|
||||
c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
|
||||
public Flux<Broker> getBrokers(String clusterName) {
|
||||
|
@ -253,6 +254,15 @@ public class ClusterService {
|
|||
return updatedCluster;
|
||||
}
|
||||
|
||||
public Mono<Cluster> updateCluster(String clusterName) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> kafkaService.getUpdatedCluster(cluster)
|
||||
.doOnNext(updatedCluster -> clustersStorage
|
||||
.setKafkaCluster(updatedCluster.getName(), updatedCluster))
|
||||
.map(clusterMapper::toCluster))
|
||||
.orElse(Mono.error(new ClusterNotFoundException()));
|
||||
}
|
||||
|
||||
public Flux<TopicMessage> getMessages(String clusterName, String topicName,
|
||||
ConsumerPosition consumerPosition, String query,
|
||||
Integer limit) {
|
||||
|
@ -272,5 +282,83 @@ public class ClusterService {
|
|||
.flatMap(offsets -> kafkaService.deleteTopicMessages(cluster, offsets));
|
||||
}
|
||||
|
||||
public Mono<PartitionsIncreaseResponse> increaseTopicPartitions(
|
||||
String clusterName,
|
||||
String topicName,
|
||||
PartitionsIncrease partitionsIncrease) {
|
||||
return clustersStorage.getClusterByName(clusterName).map(cluster ->
|
||||
kafkaService.increaseTopicPartitions(cluster, topicName, partitionsIncrease)
|
||||
.doOnNext(t -> updateCluster(t, cluster.getName(), cluster))
|
||||
.map(t -> new PartitionsIncreaseResponse()
|
||||
.topicName(t.getName())
|
||||
.totalPartitionsCount(t.getPartitionCount())))
|
||||
.orElse(Mono.error(new ClusterNotFoundException(
|
||||
String.format("No cluster for name '%s'", clusterName)
|
||||
)));
|
||||
}
|
||||
|
||||
public Mono<Void> deleteConsumerGroupById(String clusterName,
|
||||
String groupId) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> kafkaService.getOrCreateAdminClient(cluster)
|
||||
.map(ExtendedAdminClient::getAdminClient)
|
||||
.map(adminClient -> adminClient.deleteConsumerGroups(List.of(groupId)))
|
||||
.map(DeleteConsumerGroupsResult::all)
|
||||
.flatMap(ClusterUtil::toMono)
|
||||
.onErrorResume(this::reThrowCustomException)
|
||||
)
|
||||
.orElse(Mono.empty());
|
||||
}
|
||||
|
||||
public TopicMessageSchema getTopicSchema(String clusterName, String topicName) {
|
||||
var cluster = clustersStorage.getClusterByName(clusterName)
|
||||
.orElseThrow(ClusterNotFoundException::new);
|
||||
if (!cluster.getTopics().containsKey(topicName)) {
|
||||
throw new TopicNotFoundException();
|
||||
}
|
||||
return deserializationService
|
||||
.getRecordDeserializerForCluster(cluster)
|
||||
.getTopicSchema(topicName);
|
||||
}
|
||||
|
||||
public Mono<Void> sendMessage(String clusterName, String topicName, CreateTopicMessage msg) {
|
||||
var cluster = clustersStorage.getClusterByName(clusterName)
|
||||
.orElseThrow(ClusterNotFoundException::new);
|
||||
if (!cluster.getTopics().containsKey(topicName)) {
|
||||
throw new TopicNotFoundException();
|
||||
}
|
||||
if (msg.getKey() == null && msg.getContent() == null) {
|
||||
throw new ValidationException("Invalid message: both key and value can't be null");
|
||||
}
|
||||
if (msg.getPartition() != null
|
||||
&& msg.getPartition() > cluster.getTopics().get(topicName).getPartitionCount() - 1) {
|
||||
throw new ValidationException("Invalid partition");
|
||||
}
|
||||
return kafkaService.sendMessage(cluster, topicName, msg).then();
|
||||
}
|
||||
|
||||
@NotNull
|
||||
private Mono<Void> reThrowCustomException(Throwable e) {
|
||||
if (e instanceof GroupIdNotFoundException) {
|
||||
return Mono.error(new NotFoundException("The group id does not exist"));
|
||||
} else if (e instanceof GroupNotEmptyException) {
|
||||
return Mono.error(new IllegalEntityStateException("The group is not empty"));
|
||||
} else {
|
||||
return Mono.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
public Mono<ReplicationFactorChangeResponse> changeReplicationFactor(
|
||||
String clusterName,
|
||||
String topicName,
|
||||
ReplicationFactorChange replicationFactorChange) {
|
||||
return clustersStorage.getClusterByName(clusterName).map(cluster ->
|
||||
kafkaService.changeReplicationFactor(cluster, topicName, replicationFactorChange)
|
||||
.doOnNext(topic -> updateCluster(topic, cluster.getName(), cluster))
|
||||
.map(t -> new ReplicationFactorChangeResponse()
|
||||
.topicName(t.getName())
|
||||
.totalReplicationFactor(t.getReplicationFactor())))
|
||||
.orElse(Mono.error(new ClusterNotFoundException(
|
||||
String.format("No cluster for name '%s'", clusterName))));
|
||||
}
|
||||
}
|
|
@ -17,13 +17,15 @@ public class ClustersMetricsScheduler {
|
|||
|
||||
private final MetricsUpdateService metricsUpdateService;
|
||||
|
||||
@Scheduled(fixedRate = 30000)
|
||||
@Scheduled(fixedRateString = "${kafka.update-metrics-rate-millis:30000}")
|
||||
public void updateMetrics() {
|
||||
Flux.fromIterable(clustersStorage.getKafkaClustersMap().entrySet())
|
||||
.subscribeOn(Schedulers.parallel())
|
||||
.parallel()
|
||||
.runOn(Schedulers.parallel())
|
||||
.map(Map.Entry::getValue)
|
||||
.flatMap(metricsUpdateService::updateMetrics)
|
||||
.doOnNext(s -> clustersStorage.setKafkaCluster(s.getName(), s))
|
||||
.subscribe();
|
||||
.then()
|
||||
.block();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,29 +1,27 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.deserialization.DeserializationService;
|
||||
import com.provectus.kafka.ui.deserialization.RecordDeserializer;
|
||||
import com.provectus.kafka.ui.emitter.BackwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.emitter.ForwardRecordEmitter;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.SeekType;
|
||||
import com.provectus.kafka.ui.model.SeekDirection;
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||
import java.time.Duration;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekForward;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecords;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
@ -51,10 +49,20 @@ public class ConsumingService {
|
|||
int recordsLimit = Optional.ofNullable(limit)
|
||||
.map(s -> Math.min(s, MAX_RECORD_LIMIT))
|
||||
.orElse(DEFAULT_RECORD_LIMIT);
|
||||
RecordEmitter emitter = new RecordEmitter(
|
||||
() -> kafkaService.createConsumer(cluster),
|
||||
new OffsetsSeek(topic, consumerPosition));
|
||||
RecordDeserializer recordDeserializer =
|
||||
|
||||
java.util.function.Consumer<? super FluxSink<ConsumerRecord<Bytes, Bytes>>> emitter;
|
||||
if (consumerPosition.getSeekDirection().equals(SeekDirection.FORWARD)) {
|
||||
emitter = new ForwardRecordEmitter(
|
||||
() -> kafkaService.createConsumer(cluster),
|
||||
new OffsetsSeekForward(topic, consumerPosition)
|
||||
);
|
||||
} else {
|
||||
emitter = new BackwardRecordEmitter(
|
||||
(Map<String, Object> props) -> kafkaService.createConsumer(cluster, props),
|
||||
new OffsetsSeekBackward(topic, consumerPosition, recordsLimit)
|
||||
);
|
||||
}
|
||||
RecordSerDe recordDeserializer =
|
||||
deserializationService.getRecordDeserializerForCluster(cluster);
|
||||
return Flux.create(emitter)
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
|
@ -79,7 +87,7 @@ public class ConsumingService {
|
|||
* returns end offsets for partitions where start offset != end offsets.
|
||||
* This is useful when we need to verify that partition is not empty.
|
||||
*/
|
||||
private static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
|
||||
public static Map<TopicPartition, Long> significantOffsets(Consumer<?, ?> consumer,
|
||||
String topicName,
|
||||
Collection<Integer>
|
||||
partitionsToInclude) {
|
||||
|
@ -98,159 +106,8 @@ public class ConsumingService {
|
|||
if (StringUtils.isEmpty(query)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
Object content = message.getContent();
|
||||
JsonNode tree = objectMapper.valueToTree(content);
|
||||
return treeContainsValue(tree, query);
|
||||
return (StringUtils.isNotEmpty(message.getKey()) && message.getKey().contains(query))
|
||||
|| (StringUtils.isNotEmpty(message.getContent()) && message.getContent().contains(query));
|
||||
}
|
||||
|
||||
private boolean treeContainsValue(JsonNode tree, String query) {
|
||||
LinkedList<JsonNode> nodesForSearch = new LinkedList<>();
|
||||
nodesForSearch.add(tree);
|
||||
|
||||
while (!nodesForSearch.isEmpty()) {
|
||||
JsonNode node = nodesForSearch.removeFirst();
|
||||
|
||||
if (node.isContainerNode()) {
|
||||
node.elements().forEachRemaining(nodesForSearch::add);
|
||||
continue;
|
||||
}
|
||||
|
||||
String nodeValue = node.asText();
|
||||
if (nodeValue.contains(query)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@RequiredArgsConstructor
|
||||
static class RecordEmitter
|
||||
implements java.util.function.Consumer<FluxSink<ConsumerRecord<Bytes, Bytes>>> {
|
||||
|
||||
private static final Duration POLL_TIMEOUT_MS = Duration.ofMillis(1000L);
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final OffsetsSeek offsetsSeek;
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<ConsumerRecord<Bytes, Bytes>> sink) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
var waitingOffsets = offsetsSeek.assignAndSeek(consumer);
|
||||
while (!sink.isCancelled() && !waitingOffsets.endReached()) {
|
||||
ConsumerRecords<Bytes, Bytes> records = consumer.poll(POLL_TIMEOUT_MS);
|
||||
log.info("{} records polled", records.count());
|
||||
for (ConsumerRecord<Bytes, Bytes> record : records) {
|
||||
if (!sink.isCancelled() && !waitingOffsets.endReached()) {
|
||||
sink.next(record);
|
||||
waitingOffsets.markPolled(record);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
sink.complete();
|
||||
log.info("Polling finished");
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@RequiredArgsConstructor
|
||||
static class OffsetsSeek {
|
||||
|
||||
private final String topic;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
|
||||
public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
|
||||
SeekType seekType = consumerPosition.getSeekType();
|
||||
log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
|
||||
switch (seekType) {
|
||||
case OFFSET:
|
||||
assignAndSeekForOffset(consumer);
|
||||
break;
|
||||
case TIMESTAMP:
|
||||
assignAndSeekForTimestamp(consumer);
|
||||
break;
|
||||
case BEGINNING:
|
||||
assignAndSeekFromBeginning(consumer);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown seekType: " + seekType);
|
||||
}
|
||||
log.info("Assignment: {}", consumer.assignment());
|
||||
return new WaitingOffsets(topic, consumer);
|
||||
}
|
||||
|
||||
private List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
|
||||
Map<Integer, Long> partitionPositions = consumerPosition.getSeekTo();
|
||||
return consumer.partitionsFor(topic).stream()
|
||||
.filter(
|
||||
p -> partitionPositions.isEmpty() || partitionPositions.containsKey(p.partition()))
|
||||
.map(p -> new TopicPartition(p.topic(), p.partition()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private void assignAndSeekForOffset(Consumer<Bytes, Bytes> consumer) {
|
||||
List<TopicPartition> partitions = getRequestedPartitions(consumer);
|
||||
consumer.assign(partitions);
|
||||
consumerPosition.getSeekTo().forEach((partition, offset) -> {
|
||||
TopicPartition topicPartition = new TopicPartition(topic, partition);
|
||||
consumer.seek(topicPartition, offset);
|
||||
});
|
||||
}
|
||||
|
||||
private void assignAndSeekForTimestamp(Consumer<Bytes, Bytes> consumer) {
|
||||
Map<TopicPartition, Long> timestampsToSearch =
|
||||
consumerPosition.getSeekTo().entrySet().stream()
|
||||
.collect(Collectors.toMap(
|
||||
partitionPosition -> new TopicPartition(topic, partitionPosition.getKey()),
|
||||
Map.Entry::getValue
|
||||
));
|
||||
Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
|
||||
.entrySet().stream()
|
||||
.filter(e -> e.getValue() != null)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
|
||||
|
||||
if (offsetsForTimestamps.isEmpty()) {
|
||||
throw new IllegalArgumentException("No offsets were found for requested timestamps");
|
||||
}
|
||||
|
||||
consumer.assign(offsetsForTimestamps.keySet());
|
||||
offsetsForTimestamps.forEach(consumer::seek);
|
||||
}
|
||||
|
||||
private void assignAndSeekFromBeginning(Consumer<Bytes, Bytes> consumer) {
|
||||
List<TopicPartition> partitions = getRequestedPartitions(consumer);
|
||||
consumer.assign(partitions);
|
||||
consumer.seekToBeginning(partitions);
|
||||
}
|
||||
|
||||
static class WaitingOffsets {
|
||||
final Map<Integer, Long> offsets = new HashMap<>(); // partition number -> offset
|
||||
|
||||
WaitingOffsets(String topic, Consumer<?, ?> consumer) {
|
||||
var partitions = consumer.assignment().stream()
|
||||
.map(TopicPartition::partition)
|
||||
.collect(Collectors.toList());
|
||||
significantOffsets(consumer, topic, partitions)
|
||||
.forEach((tp, offset) -> offsets.put(tp.partition(), offset - 1));
|
||||
}
|
||||
|
||||
void markPolled(ConsumerRecord<?, ?> rec) {
|
||||
Long waiting = offsets.get(rec.partition());
|
||||
if (waiting != null && waiting <= rec.offset()) {
|
||||
offsets.remove(rec.partition());
|
||||
}
|
||||
}
|
||||
|
||||
boolean endReached() {
|
||||
return offsets.isEmpty();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,37 +1,50 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerGroup;
|
||||
import com.provectus.kafka.ui.exception.TopicMetadataException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.CleanupPolicy;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessage;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerDiskUsage;
|
||||
import com.provectus.kafka.ui.model.InternalBrokerMetrics;
|
||||
import com.provectus.kafka.ui.model.InternalClusterMetrics;
|
||||
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.InternalPartition;
|
||||
import com.provectus.kafka.ui.model.InternalReplica;
|
||||
import com.provectus.kafka.ui.model.InternalSegmentSizeDto;
|
||||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.Metric;
|
||||
import com.provectus.kafka.ui.model.PartitionsIncrease;
|
||||
import com.provectus.kafka.ui.model.ReplicationFactorChange;
|
||||
import com.provectus.kafka.ui.model.ServerStatus;
|
||||
import com.provectus.kafka.ui.model.TopicConsumerGroups;
|
||||
import com.provectus.kafka.ui.model.TopicCreation;
|
||||
import com.provectus.kafka.ui.model.TopicUpdate;
|
||||
import com.provectus.kafka.ui.serde.DeserializationService;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import com.provectus.kafka.ui.util.ClusterUtil;
|
||||
import com.provectus.kafka.ui.util.JmxClusterUtil;
|
||||
import com.provectus.kafka.ui.util.JmxMetricsName;
|
||||
import com.provectus.kafka.ui.util.JmxMetricsValueName;
|
||||
import java.math.BigDecimal;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.LongSummaryStatistics;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Properties;
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.Setter;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.admin.AdminClient;
|
||||
|
@ -39,17 +52,23 @@ import org.apache.kafka.clients.admin.AdminClientConfig;
|
|||
import org.apache.kafka.clients.admin.AlterConfigOp;
|
||||
import org.apache.kafka.clients.admin.Config;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||
import org.apache.kafka.clients.admin.ConsumerGroupListing;
|
||||
import org.apache.kafka.clients.admin.ListTopicsOptions;
|
||||
import org.apache.kafka.clients.admin.NewPartitionReassignment;
|
||||
import org.apache.kafka.clients.admin.NewPartitions;
|
||||
import org.apache.kafka.clients.admin.NewTopic;
|
||||
import org.apache.kafka.clients.admin.RecordsToDelete;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.clients.producer.KafkaProducer;
|
||||
import org.apache.kafka.clients.producer.ProducerConfig;
|
||||
import org.apache.kafka.clients.producer.ProducerRecord;
|
||||
import org.apache.kafka.clients.producer.RecordMetadata;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.config.ConfigResource;
|
||||
import org.apache.kafka.common.serialization.ByteArraySerializer;
|
||||
import org.apache.kafka.common.serialization.BytesDeserializer;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.springframework.beans.factory.annotation.Value;
|
||||
|
@ -71,6 +90,8 @@ public class KafkaService {
|
|||
private final Map<String, ExtendedAdminClient> adminClientCache = new ConcurrentHashMap<>();
|
||||
private final JmxClusterUtil jmxClusterUtil;
|
||||
private final ClustersStorage clustersStorage;
|
||||
private final DeserializationService deserializationService;
|
||||
@Setter // used in tests
|
||||
@Value("${kafka.admin-client-timeout}")
|
||||
private int clientTimeout;
|
||||
|
||||
|
@ -90,13 +111,16 @@ public class KafkaService {
|
|||
public Mono<KafkaCluster> getUpdatedCluster(KafkaCluster cluster) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
.flatMap(
|
||||
ac -> getClusterMetrics(ac.getAdminClient())
|
||||
.flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac.getAdminClient()))
|
||||
.flatMap(clusterMetrics ->
|
||||
getTopicsData(ac.getAdminClient()).flatMap(it ->
|
||||
updateSegmentMetrics(ac.getAdminClient(), clusterMetrics, it)
|
||||
).map(segmentSizeDto -> buildFromData(cluster, segmentSizeDto))
|
||||
)
|
||||
ac -> ClusterUtil.getClusterVersion(ac.getAdminClient()).flatMap(
|
||||
version ->
|
||||
getClusterMetrics(ac.getAdminClient())
|
||||
.flatMap(i -> fillJmxMetrics(i, cluster.getName(), ac.getAdminClient()))
|
||||
.flatMap(clusterMetrics ->
|
||||
getTopicsData(ac.getAdminClient()).flatMap(it ->
|
||||
updateSegmentMetrics(ac.getAdminClient(), clusterMetrics, it)
|
||||
).map(segmentSizeDto -> buildFromData(cluster, version, segmentSizeDto))
|
||||
)
|
||||
)
|
||||
).onErrorResume(
|
||||
e -> Mono.just(cluster.toBuilder()
|
||||
.status(ServerStatus.OFFLINE)
|
||||
|
@ -106,10 +130,12 @@ public class KafkaService {
|
|||
}
|
||||
|
||||
private KafkaCluster buildFromData(KafkaCluster currentCluster,
|
||||
String version,
|
||||
InternalSegmentSizeDto segmentSizeDto) {
|
||||
|
||||
var topics = segmentSizeDto.getInternalTopicWithSegmentSize();
|
||||
var brokersMetrics = segmentSizeDto.getClusterMetricsWithSegmentSize();
|
||||
var brokersIds = new ArrayList<>(brokersMetrics.getInternalBrokerMetrics().keySet());
|
||||
|
||||
InternalClusterMetrics.InternalClusterMetricsBuilder metricsBuilder =
|
||||
brokersMetrics.toBuilder();
|
||||
|
@ -135,15 +161,18 @@ public class KafkaService {
|
|||
.onlinePartitionCount(topicsMetrics.getOnlinePartitionCount())
|
||||
.offlinePartitionCount(topicsMetrics.getOfflinePartitionCount())
|
||||
.zooKeeperStatus(ClusterUtil.convertToIntServerStatus(zookeeperStatus))
|
||||
.version(version)
|
||||
.build();
|
||||
|
||||
return currentCluster.toBuilder()
|
||||
.version(version)
|
||||
.status(ServerStatus.ONLINE)
|
||||
.zookeeperStatus(zookeeperStatus)
|
||||
.lastZookeeperException(zookeeperException)
|
||||
.lastKafkaException(null)
|
||||
.metrics(clusterMetrics)
|
||||
.topics(topics)
|
||||
.brokers(brokersIds)
|
||||
.build();
|
||||
}
|
||||
|
||||
|
@ -179,12 +208,18 @@ public class KafkaService {
|
|||
|
||||
private Map<String, InternalTopic> mergeWithConfigs(
|
||||
List<InternalTopic> topics, Map<String, List<InternalTopicConfig>> configs) {
|
||||
return topics.stream().map(
|
||||
t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build()
|
||||
).collect(Collectors.toMap(
|
||||
InternalTopic::getName,
|
||||
e -> e
|
||||
));
|
||||
return topics.stream()
|
||||
.map(t -> t.toBuilder().topicConfigs(configs.get(t.getName())).build())
|
||||
.map(t -> t.toBuilder().cleanUpPolicy(
|
||||
CleanupPolicy.fromString(t.getTopicConfigs().stream()
|
||||
.filter(config -> config.getName().equals("cleanup.policy"))
|
||||
.findFirst()
|
||||
.orElseGet(() -> InternalTopicConfig.builder().value("unknown").build())
|
||||
.getValue())).build())
|
||||
.collect(Collectors.toMap(
|
||||
InternalTopic::getName,
|
||||
e -> e
|
||||
));
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
|
@ -197,11 +232,12 @@ public class KafkaService {
|
|||
final Mono<Map<String, List<InternalTopicConfig>>> configsMono =
|
||||
loadTopicsConfig(adminClient, topics);
|
||||
|
||||
return ClusterUtil.toMono(adminClient.describeTopics(topics).all()).map(
|
||||
m -> m.values().stream().map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList())
|
||||
).flatMap(internalTopics -> configsMono.map(configs ->
|
||||
mergeWithConfigs(internalTopics, configs).values()
|
||||
)).flatMapMany(Flux::fromIterable);
|
||||
return ClusterUtil.toMono(adminClient.describeTopics(topics).all())
|
||||
.map(m -> m.values().stream()
|
||||
.map(ClusterUtil::mapToInternalTopic).collect(Collectors.toList()))
|
||||
.flatMap(internalTopics -> configsMono
|
||||
.map(configs -> mergeWithConfigs(internalTopics, configs).values()))
|
||||
.flatMapMany(Flux::fromIterable);
|
||||
}
|
||||
|
||||
|
||||
|
@ -234,10 +270,12 @@ public class KafkaService {
|
|||
topicData.getReplicationFactor().shortValue());
|
||||
newTopic.configs(topicData.getConfigs());
|
||||
return createTopic(adminClient, newTopic).map(v -> topicData);
|
||||
}).flatMap(
|
||||
topicData ->
|
||||
getTopicsData(adminClient, Collections.singleton(topicData.getName()))
|
||||
.next()
|
||||
})
|
||||
.onErrorResume(t -> Mono.error(new TopicMetadataException(t.getMessage())))
|
||||
.flatMap(
|
||||
topicData ->
|
||||
getTopicsData(adminClient, Collections.singleton(topicData.getName()))
|
||||
.next()
|
||||
).switchIfEmpty(Mono.error(new RuntimeException("Can't find created topic")))
|
||||
.flatMap(t ->
|
||||
loadTopicsConfig(adminClient, Collections.singletonList(t.getName()))
|
||||
|
@ -299,45 +337,59 @@ public class KafkaService {
|
|||
);
|
||||
}
|
||||
|
||||
public Mono<Collection<ConsumerGroupDescription>> getConsumerGroupsInternal(
|
||||
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(
|
||||
KafkaCluster cluster) {
|
||||
return getOrCreateAdminClient(cluster).flatMap(ac ->
|
||||
ClusterUtil.toMono(ac.getAdminClient().listConsumerGroups().all())
|
||||
.flatMap(s ->
|
||||
ClusterUtil.toMono(
|
||||
ac.getAdminClient().describeConsumerGroups(
|
||||
s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList())
|
||||
).all()
|
||||
).map(Map::values)
|
||||
getConsumerGroupsInternal(
|
||||
cluster,
|
||||
s.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toList()))
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<List<InternalConsumerGroup>> getConsumerGroupsInternal(
|
||||
KafkaCluster cluster, List<String> groupIds) {
|
||||
|
||||
return getOrCreateAdminClient(cluster).flatMap(ac ->
|
||||
ClusterUtil.toMono(
|
||||
ac.getAdminClient().describeConsumerGroups(groupIds).all()
|
||||
).map(Map::values)
|
||||
).flatMap(descriptions ->
|
||||
Flux.fromIterable(descriptions)
|
||||
.parallel()
|
||||
.flatMap(d ->
|
||||
groupMetadata(cluster, d.groupId())
|
||||
.map(offsets -> ClusterUtil.convertToInternalConsumerGroup(d, offsets))
|
||||
)
|
||||
.sequential()
|
||||
.collectList()
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<List<ConsumerGroup>> getConsumerGroups(KafkaCluster cluster) {
|
||||
return getConsumerGroupsInternal(cluster)
|
||||
.map(c -> c.stream().map(ClusterUtil::convertToConsumerGroup).collect(Collectors.toList()));
|
||||
}
|
||||
public Mono<List<InternalConsumerGroup>> getConsumerGroups(
|
||||
KafkaCluster cluster, Optional<String> topic, List<String> groupIds) {
|
||||
final Mono<List<InternalConsumerGroup>> consumerGroups;
|
||||
|
||||
public Mono<TopicConsumerGroups> getTopicConsumerGroups(KafkaCluster cluster, String topic) {
|
||||
final Map<TopicPartition, Long> endOffsets = topicEndOffsets(cluster, topic);
|
||||
if (groupIds.isEmpty()) {
|
||||
consumerGroups = getConsumerGroupsInternal(cluster);
|
||||
} else {
|
||||
consumerGroups = getConsumerGroupsInternal(cluster, groupIds);
|
||||
}
|
||||
|
||||
return getConsumerGroupsInternal(cluster)
|
||||
.flatMapIterable(c ->
|
||||
return consumerGroups.map(c ->
|
||||
c.stream()
|
||||
.map(d -> ClusterUtil.filterConsumerGroupTopic(d, topic))
|
||||
.filter(Optional::isPresent)
|
||||
.map(Optional::get)
|
||||
.map(d ->
|
||||
groupMetadata(cluster, d.groupId())
|
||||
.flatMapIterable(meta ->
|
||||
d.members().stream().flatMap(m ->
|
||||
ClusterUtil.convertToConsumerTopicPartitionDetails(
|
||||
m, meta, endOffsets, d.groupId()
|
||||
).stream()
|
||||
).collect(Collectors.toList())
|
||||
)
|
||||
).collect(Collectors.toList())
|
||||
).flatMap(f -> f).collectList().map(l -> new TopicConsumerGroups().consumers(l));
|
||||
.map(g ->
|
||||
g.toBuilder().endOffsets(
|
||||
topicPartitionsEndOffsets(cluster, g.getOffsets().keySet())
|
||||
).build()
|
||||
)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<Map<TopicPartition, OffsetAndMetadata>> groupMetadata(KafkaCluster cluster,
|
||||
|
@ -349,16 +401,6 @@ public class KafkaService {
|
|||
).flatMap(ClusterUtil::toMono);
|
||||
}
|
||||
|
||||
public Map<TopicPartition, Long> topicEndOffsets(
|
||||
KafkaCluster cluster, String topic) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
|
||||
final List<TopicPartition> topicPartitions = consumer.partitionsFor(topic).stream()
|
||||
.map(i -> new TopicPartition(i.topic(), i.partition()))
|
||||
.collect(Collectors.toList());
|
||||
return consumer.endOffsets(topicPartitions);
|
||||
}
|
||||
}
|
||||
|
||||
public Map<TopicPartition, Long> topicPartitionsEndOffsets(
|
||||
KafkaCluster cluster, Collection<TopicPartition> topicPartitions) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = createConsumer(cluster)) {
|
||||
|
@ -367,13 +409,19 @@ public class KafkaService {
|
|||
}
|
||||
|
||||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster) {
|
||||
return createConsumer(cluster, Map.of());
|
||||
}
|
||||
|
||||
public KafkaConsumer<Bytes, Bytes> createConsumer(KafkaCluster cluster,
|
||||
Map<String, Object> properties) {
|
||||
Properties props = new Properties();
|
||||
props.putAll(cluster.getProperties());
|
||||
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui");
|
||||
props.put(ConsumerConfig.CLIENT_ID_CONFIG, "kafka-ui-" + UUID.randomUUID());
|
||||
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class);
|
||||
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
||||
props.putAll(properties);
|
||||
|
||||
return new KafkaConsumer<>(props);
|
||||
}
|
||||
|
@ -487,7 +535,7 @@ public class KafkaService {
|
|||
final Map<Integer, LongSummaryStatistics> brokerStats =
|
||||
topicPartitions.stream().collect(
|
||||
Collectors.groupingBy(
|
||||
t -> t.getT1(),
|
||||
Tuple2::getT1,
|
||||
Collectors.summarizingLong(Tuple3::getT3)
|
||||
)
|
||||
);
|
||||
|
@ -631,5 +679,212 @@ public class KafkaService {
|
|||
.map(ac -> ac.deleteRecords(records)).then();
|
||||
}
|
||||
|
||||
public Mono<RecordMetadata> sendMessage(KafkaCluster cluster, String topic,
|
||||
CreateTopicMessage msg) {
|
||||
RecordSerDe serde =
|
||||
deserializationService.getRecordDeserializerForCluster(cluster);
|
||||
|
||||
Properties properties = new Properties();
|
||||
properties.putAll(cluster.getProperties());
|
||||
properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers());
|
||||
properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
|
||||
try (KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(properties)) {
|
||||
final ProducerRecord<byte[], byte[]> producerRecord = serde.serialize(
|
||||
topic,
|
||||
msg.getKey(),
|
||||
msg.getContent(),
|
||||
msg.getPartition()
|
||||
);
|
||||
|
||||
CompletableFuture<RecordMetadata> cf = new CompletableFuture<>();
|
||||
producer.send(producerRecord, (metadata, exception) -> {
|
||||
if (exception != null) {
|
||||
cf.completeExceptionally(exception);
|
||||
} else {
|
||||
cf.complete(metadata);
|
||||
}
|
||||
});
|
||||
return Mono.fromFuture(cf);
|
||||
}
|
||||
}
|
||||
|
||||
private Mono<InternalTopic> increaseTopicPartitions(AdminClient adminClient,
|
||||
String topicName,
|
||||
Map<String, NewPartitions> newPartitionsMap
|
||||
) {
|
||||
return ClusterUtil.toMono(adminClient.createPartitions(newPartitionsMap).all(), topicName)
|
||||
.flatMap(topic -> getTopicsData(adminClient, Collections.singleton(topic)).next());
|
||||
}
|
||||
|
||||
public Mono<InternalTopic> increaseTopicPartitions(
|
||||
KafkaCluster cluster,
|
||||
String topicName,
|
||||
PartitionsIncrease partitionsIncrease) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> {
|
||||
Integer actualCount = cluster.getTopics().get(topicName).getPartitionCount();
|
||||
Integer requestedCount = partitionsIncrease.getTotalPartitionsCount();
|
||||
|
||||
if (requestedCount < actualCount) {
|
||||
return Mono.error(
|
||||
new ValidationException(String.format(
|
||||
"Topic currently has %s partitions, which is higher than the requested %s.",
|
||||
actualCount, requestedCount)));
|
||||
}
|
||||
if (requestedCount.equals(actualCount)) {
|
||||
return Mono.error(
|
||||
new ValidationException(
|
||||
String.format("Topic already has %s partitions.", actualCount)));
|
||||
}
|
||||
|
||||
Map<String, NewPartitions> newPartitionsMap = Collections.singletonMap(
|
||||
topicName,
|
||||
NewPartitions.increaseTo(partitionsIncrease.getTotalPartitionsCount())
|
||||
);
|
||||
return increaseTopicPartitions(ac.getAdminClient(), topicName, newPartitionsMap);
|
||||
});
|
||||
}
|
||||
|
||||
private Mono<InternalTopic> changeReplicationFactor(
|
||||
AdminClient adminClient,
|
||||
String topicName,
|
||||
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments
|
||||
) {
|
||||
return ClusterUtil.toMono(adminClient
|
||||
.alterPartitionReassignments(reassignments).all(), topicName)
|
||||
.flatMap(topic -> getTopicsData(adminClient, Collections.singleton(topic)).next());
|
||||
}
|
||||
|
||||
/**
|
||||
* Change topic replication factor, works on brokers versions 5.4.x and higher
|
||||
*/
|
||||
public Mono<InternalTopic> changeReplicationFactor(
|
||||
KafkaCluster cluster,
|
||||
String topicName,
|
||||
ReplicationFactorChange replicationFactorChange) {
|
||||
return getOrCreateAdminClient(cluster)
|
||||
.flatMap(ac -> {
|
||||
Integer actual = cluster.getTopics().get(topicName).getReplicationFactor();
|
||||
Integer requested = replicationFactorChange.getTotalReplicationFactor();
|
||||
Integer brokersCount = cluster.getMetrics().getBrokerCount();
|
||||
|
||||
if (requested.equals(actual)) {
|
||||
return Mono.error(
|
||||
new ValidationException(
|
||||
String.format("Topic already has replicationFactor %s.", actual)));
|
||||
}
|
||||
if (requested > brokersCount) {
|
||||
return Mono.error(
|
||||
new ValidationException(
|
||||
String.format("Requested replication factor %s more than brokers count %s.",
|
||||
requested, brokersCount)));
|
||||
}
|
||||
return changeReplicationFactor(ac.getAdminClient(), topicName,
|
||||
getPartitionsReassignments(cluster, topicName,
|
||||
replicationFactorChange));
|
||||
});
|
||||
}
|
||||
|
||||
private Map<TopicPartition, Optional<NewPartitionReassignment>> getPartitionsReassignments(
|
||||
KafkaCluster cluster,
|
||||
String topicName,
|
||||
ReplicationFactorChange replicationFactorChange) {
|
||||
// Current assignment map (Partition number -> List of brokers)
|
||||
Map<Integer, List<Integer>> currentAssignment = getCurrentAssignment(cluster, topicName);
|
||||
// Brokers map (Broker id -> count)
|
||||
Map<Integer, Integer> brokersUsage = getBrokersMap(cluster, currentAssignment);
|
||||
int currentReplicationFactor = cluster.getTopics().get(topicName).getReplicationFactor();
|
||||
|
||||
// If we should to increase Replication factor
|
||||
if (replicationFactorChange.getTotalReplicationFactor() > currentReplicationFactor) {
|
||||
// For each partition
|
||||
for (var assignmentList : currentAssignment.values()) {
|
||||
// Get brokers list sorted by usage
|
||||
var brokers = brokersUsage.entrySet().stream()
|
||||
.sorted(Map.Entry.comparingByValue())
|
||||
.map(Map.Entry::getKey)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Iterate brokers and try to add them in assignment
|
||||
// while (partition replicas count != requested replication factor)
|
||||
for (Integer broker : brokers) {
|
||||
if (!assignmentList.contains(broker)) {
|
||||
assignmentList.add(broker);
|
||||
brokersUsage.merge(broker, 1, Integer::sum);
|
||||
}
|
||||
if (assignmentList.size() == replicationFactorChange.getTotalReplicationFactor()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (assignmentList.size() != replicationFactorChange.getTotalReplicationFactor()) {
|
||||
throw new ValidationException("Something went wrong during adding replicas");
|
||||
}
|
||||
}
|
||||
|
||||
// If we should to decrease Replication factor
|
||||
} else if (replicationFactorChange.getTotalReplicationFactor() < currentReplicationFactor) {
|
||||
for (Map.Entry<Integer, List<Integer>> assignmentEntry : currentAssignment.entrySet()) {
|
||||
var partition = assignmentEntry.getKey();
|
||||
var brokers = assignmentEntry.getValue();
|
||||
|
||||
// Get brokers list sorted by usage in reverse order
|
||||
var brokersUsageList = brokersUsage.entrySet().stream()
|
||||
.sorted(Map.Entry.comparingByValue(Comparator.reverseOrder()))
|
||||
.map(Map.Entry::getKey)
|
||||
.collect(Collectors.toList());
|
||||
|
||||
// Iterate brokers and try to remove them from assignment
|
||||
// while (partition replicas count != requested replication factor)
|
||||
for (Integer broker : brokersUsageList) {
|
||||
// Check is the broker the leader of partition
|
||||
if (!cluster.getTopics().get(topicName).getPartitions().get(partition).getLeader()
|
||||
.equals(broker)) {
|
||||
brokers.remove(broker);
|
||||
brokersUsage.merge(broker, -1, Integer::sum);
|
||||
}
|
||||
if (brokers.size() == replicationFactorChange.getTotalReplicationFactor()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (brokers.size() != replicationFactorChange.getTotalReplicationFactor()) {
|
||||
throw new ValidationException("Something went wrong during removing replicas");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
throw new ValidationException("Replication factor already equals requested");
|
||||
}
|
||||
|
||||
// Return result map
|
||||
return currentAssignment.entrySet().stream().collect(Collectors.toMap(
|
||||
e -> new TopicPartition(topicName, e.getKey()),
|
||||
e -> Optional.of(new NewPartitionReassignment(e.getValue()))
|
||||
));
|
||||
}
|
||||
|
||||
private Map<Integer, List<Integer>> getCurrentAssignment(KafkaCluster cluster, String topicName) {
|
||||
return cluster.getTopics().get(topicName).getPartitions().values().stream()
|
||||
.collect(Collectors.toMap(
|
||||
InternalPartition::getPartition,
|
||||
p -> p.getReplicas().stream()
|
||||
.map(InternalReplica::getBroker)
|
||||
.collect(Collectors.toList())
|
||||
));
|
||||
}
|
||||
|
||||
private Map<Integer, Integer> getBrokersMap(KafkaCluster cluster,
|
||||
Map<Integer, List<Integer>> currentAssignment) {
|
||||
Map<Integer, Integer> result = cluster.getBrokers().stream()
|
||||
.collect(Collectors.toMap(
|
||||
c -> c,
|
||||
c -> 0
|
||||
));
|
||||
currentAssignment.values().forEach(brokers -> brokers
|
||||
.forEach(broker -> result.put(broker, result.get(broker) + 1)));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,169 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkArgument;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
import static java.util.stream.Collectors.toSet;
|
||||
import static org.apache.kafka.common.ConsumerGroupState.DEAD;
|
||||
import static org.apache.kafka.common.ConsumerGroupState.EMPTY;
|
||||
|
||||
import com.google.common.collect.Sets;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndTimestamp;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
/**
|
||||
* Implementation follows https://cwiki.apache.org/confluence/display/KAFKA/KIP-122%3A+Add+Reset+Consumer+Group+Offsets+tooling
|
||||
* to works like "kafka-consumer-groups --reset-offsets" console command
|
||||
* (see kafka.admin.ConsumerGroupCommand)
|
||||
*/
|
||||
@Log4j2
|
||||
@Component
|
||||
@RequiredArgsConstructor
|
||||
public class OffsetsResetService {
|
||||
|
||||
private final KafkaService kafkaService;
|
||||
|
||||
public void resetToEarliest(KafkaCluster cluster, String group, String topic,
|
||||
Collection<Integer> partitions) {
|
||||
checkGroupCondition(cluster, group);
|
||||
try (var consumer = getConsumer(cluster, group)) {
|
||||
var targetPartitions = getTargetPartitions(consumer, topic, partitions);
|
||||
var offsets = consumer.beginningOffsets(targetPartitions);
|
||||
commitOffsets(consumer, offsets);
|
||||
}
|
||||
}
|
||||
|
||||
public void resetToLatest(KafkaCluster cluster, String group, String topic,
|
||||
Collection<Integer> partitions) {
|
||||
checkGroupCondition(cluster, group);
|
||||
try (var consumer = getConsumer(cluster, group)) {
|
||||
var targetPartitions = getTargetPartitions(consumer, topic, partitions);
|
||||
var offsets = consumer.endOffsets(targetPartitions);
|
||||
commitOffsets(consumer, offsets);
|
||||
}
|
||||
}
|
||||
|
||||
public void resetToTimestamp(KafkaCluster cluster, String group, String topic,
|
||||
Collection<Integer> partitions, long targetTimestamp) {
|
||||
checkGroupCondition(cluster, group);
|
||||
try (var consumer = getConsumer(cluster, group)) {
|
||||
var targetPartitions = getTargetPartitions(consumer, topic, partitions);
|
||||
var offsets = offsetsByTimestamp(consumer, targetPartitions, targetTimestamp);
|
||||
commitOffsets(consumer, offsets);
|
||||
}
|
||||
}
|
||||
|
||||
public void resetToOffsets(KafkaCluster cluster, String group, String topic,
|
||||
Map<Integer, Long> targetOffsets) {
|
||||
checkGroupCondition(cluster, group);
|
||||
try (var consumer = getConsumer(cluster, group)) {
|
||||
var offsets = targetOffsets.entrySet().stream()
|
||||
.collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
|
||||
offsets = editOffsetsIfNeeded(consumer, offsets);
|
||||
commitOffsets(consumer, offsets);
|
||||
}
|
||||
}
|
||||
|
||||
private void checkGroupCondition(KafkaCluster cluster, String groupId) {
|
||||
InternalConsumerGroup description =
|
||||
kafkaService.getConsumerGroupsInternal(cluster)
|
||||
.blockOptional()
|
||||
.stream()
|
||||
.flatMap(Collection::stream)
|
||||
.filter(cgd -> cgd.getGroupId().equals(groupId))
|
||||
.findAny()
|
||||
.orElseThrow(() -> new NotFoundException("Consumer group not found"));
|
||||
|
||||
if (!Set.of(DEAD, EMPTY).contains(description.getState())) {
|
||||
throw new ValidationException(
|
||||
String.format(
|
||||
"Group's offsets can be reset only if group is inactive, but group is in %s state",
|
||||
description.getState()));
|
||||
}
|
||||
}
|
||||
|
||||
private Map<TopicPartition, Long> offsetsByTimestamp(Consumer<?, ?> consumer,
|
||||
Set<TopicPartition> partitions,
|
||||
long timestamp) {
|
||||
Map<TopicPartition, OffsetAndTimestamp> timestampedOffsets = consumer
|
||||
.offsetsForTimes(partitions.stream().collect(toMap(p -> p, p -> timestamp)));
|
||||
|
||||
var foundOffsets = timestampedOffsets.entrySet().stream()
|
||||
.filter(e -> e.getValue() != null)
|
||||
.collect(toMap(Map.Entry::getKey, e -> e.getValue().offset()));
|
||||
|
||||
// for partitions where we didnt find offset by timestamp, we use end offsets
|
||||
foundOffsets.putAll(consumer.endOffsets(Sets.difference(partitions, foundOffsets.keySet())));
|
||||
return foundOffsets;
|
||||
}
|
||||
|
||||
private Set<TopicPartition> getTargetPartitions(Consumer<?, ?> consumer, String topic,
|
||||
Collection<Integer> partitions) {
|
||||
var allPartitions = allTopicPartitions(consumer, topic);
|
||||
if (partitions == null || partitions.isEmpty()) {
|
||||
return allPartitions;
|
||||
} else {
|
||||
return partitions.stream()
|
||||
.map(idx -> new TopicPartition(topic, idx))
|
||||
.peek(tp -> checkArgument(allPartitions.contains(tp), "Invalid partition %s", tp))
|
||||
.collect(toSet());
|
||||
}
|
||||
}
|
||||
|
||||
private Set<TopicPartition> allTopicPartitions(Consumer<?, ?> consumer, String topic) {
|
||||
return consumer.partitionsFor(topic).stream()
|
||||
.map(info -> new TopicPartition(topic, info.partition()))
|
||||
.collect(toSet());
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if submitted offsets is between earliest and latest offsets. If case of range change
|
||||
* fail we reset offset to either earliest or latest offsets (To follow logic from
|
||||
* kafka.admin.ConsumerGroupCommand.scala)
|
||||
*/
|
||||
private Map<TopicPartition, Long> editOffsetsIfNeeded(Consumer<?, ?> consumer,
|
||||
Map<TopicPartition, Long> offsetsToCheck) {
|
||||
var earliestOffsets = consumer.beginningOffsets(offsetsToCheck.keySet());
|
||||
var latestOffsets = consumer.endOffsets(offsetsToCheck.keySet());
|
||||
var result = new HashMap<TopicPartition, Long>();
|
||||
offsetsToCheck.forEach((tp, offset) -> {
|
||||
if (earliestOffsets.get(tp) > offset) {
|
||||
log.warn("Offset for partition {} is lower than earliest offset, resetting to earliest",
|
||||
tp);
|
||||
result.put(tp, earliestOffsets.get(tp));
|
||||
} else if (latestOffsets.get(tp) < offset) {
|
||||
log.warn("Offset for partition {} is greater than latest offset, resetting to latest", tp);
|
||||
result.put(tp, latestOffsets.get(tp));
|
||||
} else {
|
||||
result.put(tp, offset);
|
||||
}
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
private void commitOffsets(Consumer<?, ?> consumer, Map<TopicPartition, Long> offsets) {
|
||||
consumer.commitSync(
|
||||
offsets.entrySet().stream()
|
||||
.collect(toMap(Map.Entry::getKey, e -> new OffsetAndMetadata(e.getValue())))
|
||||
);
|
||||
}
|
||||
|
||||
private Consumer<?, ?> getConsumer(KafkaCluster cluster, String groupId) {
|
||||
return kafkaService.createConsumer(cluster, Map.of(ConsumerConfig.GROUP_ID_CONFIG, groupId));
|
||||
}
|
||||
|
||||
}
|
|
@ -7,13 +7,16 @@ import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
|||
import com.provectus.kafka.ui.exception.DuplicateEntityException;
|
||||
import com.provectus.kafka.ui.exception.SchemaNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.mapper.ClusterMapper;
|
||||
import com.provectus.kafka.ui.model.CompatibilityCheckResponse;
|
||||
import com.provectus.kafka.ui.model.CompatibilityLevel;
|
||||
import com.provectus.kafka.ui.model.InternalSchemaRegistry;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.NewSchemaSubject;
|
||||
import com.provectus.kafka.ui.model.SchemaSubject;
|
||||
import com.provectus.kafka.ui.model.SchemaType;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.ErrorResponse;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityCheck;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.InternalCompatibilityLevel;
|
||||
import com.provectus.kafka.ui.model.schemaregistry.InternalNewSchema;
|
||||
|
@ -25,6 +28,8 @@ import java.util.function.Function;
|
|||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.springframework.http.HttpHeaders;
|
||||
import org.springframework.http.HttpMethod;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.http.ResponseEntity;
|
||||
import org.springframework.stereotype.Service;
|
||||
|
@ -60,8 +65,10 @@ public class SchemaRegistryService {
|
|||
|
||||
public Mono<String[]> getAllSubjectNames(String clusterName) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> webClient.get()
|
||||
.uri(cluster.getSchemaRegistry() + URL_SUBJECTS)
|
||||
.map(cluster -> configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.GET,
|
||||
URL_SUBJECTS)
|
||||
.retrieve()
|
||||
.bodyToMono(String[].class)
|
||||
.doOnError(log::error)
|
||||
|
@ -76,8 +83,10 @@ public class SchemaRegistryService {
|
|||
|
||||
private Flux<Integer> getSubjectVersions(String clusterName, String schemaName) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> webClient.get()
|
||||
.uri(cluster.getSchemaRegistry() + URL_SUBJECT_VERSIONS, schemaName)
|
||||
.map(cluster -> configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.GET,
|
||||
URL_SUBJECT_VERSIONS, schemaName)
|
||||
.retrieve()
|
||||
.onStatus(NOT_FOUND::equals,
|
||||
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName))
|
||||
|
@ -98,8 +107,10 @@ public class SchemaRegistryService {
|
|||
private Mono<SchemaSubject> getSchemaSubject(String clusterName, String schemaName,
|
||||
String version) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> webClient.get()
|
||||
.uri(cluster.getSchemaRegistry() + URL_SUBJECT_BY_VERSION, schemaName, version)
|
||||
.map(cluster -> configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.GET,
|
||||
URL_SUBJECT_BY_VERSION, schemaName, version)
|
||||
.retrieve()
|
||||
.onStatus(NOT_FOUND::equals,
|
||||
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
|
||||
|
@ -139,8 +150,10 @@ public class SchemaRegistryService {
|
|||
private Mono<ResponseEntity<Void>> deleteSchemaSubject(String clusterName, String schemaName,
|
||||
String version) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> webClient.delete()
|
||||
.uri(cluster.getSchemaRegistry() + URL_SUBJECT_BY_VERSION, schemaName, version)
|
||||
.map(cluster -> configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.DELETE,
|
||||
URL_SUBJECT_BY_VERSION, schemaName, version)
|
||||
.retrieve()
|
||||
.onStatus(NOT_FOUND::equals,
|
||||
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA_VERSION, schemaName, version))
|
||||
|
@ -151,8 +164,10 @@ public class SchemaRegistryService {
|
|||
public Mono<ResponseEntity<Void>> deleteSchemaSubjectEntirely(String clusterName,
|
||||
String schemaName) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> webClient.delete()
|
||||
.uri(cluster.getSchemaRegistry() + URL_SUBJECT, schemaName)
|
||||
.map(cluster -> configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.DELETE,
|
||||
URL_SUBJECT, schemaName)
|
||||
.retrieve()
|
||||
.onStatus(NOT_FOUND::equals,
|
||||
throwIfNotFoundStatus(formatted(NO_SUCH_SCHEMA, schemaName))
|
||||
|
@ -177,8 +192,8 @@ public class SchemaRegistryService {
|
|||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(KafkaCluster::getSchemaRegistry)
|
||||
.map(
|
||||
schemaRegistryUrl -> checkSchemaOnDuplicate(subject, newSchema, schemaRegistryUrl)
|
||||
.flatMap(s -> submitNewSchema(subject, newSchema, schemaRegistryUrl))
|
||||
schemaRegistry -> checkSchemaOnDuplicate(subject, newSchema, schemaRegistry)
|
||||
.flatMap(s -> submitNewSchema(subject, newSchema, schemaRegistry))
|
||||
.flatMap(resp -> getLatestSchemaVersionBySubject(clusterName, subject))
|
||||
)
|
||||
.orElse(Mono.error(ClusterNotFoundException::new));
|
||||
|
@ -188,29 +203,35 @@ public class SchemaRegistryService {
|
|||
@NotNull
|
||||
private Mono<SubjectIdResponse> submitNewSchema(String subject,
|
||||
Mono<InternalNewSchema> newSchemaSubject,
|
||||
String schemaRegistryUrl) {
|
||||
return webClient.post()
|
||||
.uri(schemaRegistryUrl + URL_SUBJECT_VERSIONS, subject)
|
||||
InternalSchemaRegistry schemaRegistry) {
|
||||
return configuredWebClient(
|
||||
schemaRegistry,
|
||||
HttpMethod.POST,
|
||||
URL_SUBJECT_VERSIONS, subject)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
|
||||
.retrieve()
|
||||
.onStatus(UNPROCESSABLE_ENTITY::equals,
|
||||
r -> Mono.error(new UnprocessableEntityException("Invalid params")))
|
||||
r -> r.bodyToMono(ErrorResponse.class)
|
||||
.flatMap(x -> Mono.error(new UnprocessableEntityException(x.getMessage()))))
|
||||
.bodyToMono(SubjectIdResponse.class);
|
||||
}
|
||||
|
||||
@NotNull
|
||||
private Mono<SchemaSubject> checkSchemaOnDuplicate(String subject,
|
||||
Mono<InternalNewSchema> newSchemaSubject,
|
||||
String schemaRegistryUrl) {
|
||||
return webClient.post()
|
||||
.uri(schemaRegistryUrl + URL_SUBJECT, subject)
|
||||
InternalSchemaRegistry schemaRegistry) {
|
||||
return configuredWebClient(
|
||||
schemaRegistry,
|
||||
HttpMethod.POST,
|
||||
URL_SUBJECT, subject)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(BodyInserters.fromPublisher(newSchemaSubject, InternalNewSchema.class))
|
||||
.retrieve()
|
||||
.onStatus(NOT_FOUND::equals, res -> Mono.empty())
|
||||
.onStatus(UNPROCESSABLE_ENTITY::equals,
|
||||
r -> Mono.error(new UnprocessableEntityException("Invalid params")))
|
||||
r -> r.bodyToMono(ErrorResponse.class)
|
||||
.flatMap(x -> Mono.error(new UnprocessableEntityException(x.getMessage()))))
|
||||
.bodyToMono(SchemaSubject.class)
|
||||
.filter(s -> Objects.isNull(s.getId()))
|
||||
.switchIfEmpty(Mono.error(new DuplicateEntityException("Such schema already exists")));
|
||||
|
@ -233,8 +254,10 @@ public class SchemaRegistryService {
|
|||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> {
|
||||
String configEndpoint = Objects.isNull(schemaName) ? "/config" : "/config/{schemaName}";
|
||||
return webClient.put()
|
||||
.uri(cluster.getSchemaRegistry() + configEndpoint, schemaName)
|
||||
return configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.PUT,
|
||||
configEndpoint, schemaName)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(BodyInserters.fromPublisher(compatibilityLevel, CompatibilityLevel.class))
|
||||
.retrieve()
|
||||
|
@ -254,8 +277,10 @@ public class SchemaRegistryService {
|
|||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> {
|
||||
String configEndpoint = Objects.isNull(schemaName) ? "/config" : "/config/{schemaName}";
|
||||
return webClient.get()
|
||||
.uri(cluster.getSchemaRegistry() + configEndpoint, schemaName)
|
||||
return configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.GET,
|
||||
configEndpoint, schemaName)
|
||||
.retrieve()
|
||||
.bodyToMono(InternalCompatibilityLevel.class)
|
||||
.map(mapper::toCompatibilityLevel)
|
||||
|
@ -276,9 +301,10 @@ public class SchemaRegistryService {
|
|||
public Mono<CompatibilityCheckResponse> checksSchemaCompatibility(
|
||||
String clusterName, String schemaName, Mono<NewSchemaSubject> newSchemaSubject) {
|
||||
return clustersStorage.getClusterByName(clusterName)
|
||||
.map(cluster -> webClient.post()
|
||||
.uri(cluster.getSchemaRegistry()
|
||||
+ "/compatibility/subjects/{schemaName}/versions/latest", schemaName)
|
||||
.map(cluster -> configuredWebClient(
|
||||
cluster,
|
||||
HttpMethod.POST,
|
||||
"/compatibility/subjects/{schemaName}/versions/latest", schemaName)
|
||||
.contentType(MediaType.APPLICATION_JSON)
|
||||
.body(BodyInserters.fromPublisher(newSchemaSubject, NewSchemaSubject.class))
|
||||
.retrieve()
|
||||
|
@ -293,4 +319,32 @@ public class SchemaRegistryService {
|
|||
public String formatted(String str, Object... args) {
|
||||
return new Formatter().format(str, args).toString();
|
||||
}
|
||||
|
||||
private void setBasicAuthIfEnabled(InternalSchemaRegistry schemaRegistry, HttpHeaders headers) {
|
||||
if (schemaRegistry.getUsername() != null && schemaRegistry.getPassword() != null) {
|
||||
headers.setBasicAuth(
|
||||
schemaRegistry.getUsername(),
|
||||
schemaRegistry.getPassword()
|
||||
);
|
||||
} else if (schemaRegistry.getUsername() != null) {
|
||||
throw new ValidationException(
|
||||
"You specified username but do not specified password");
|
||||
} else if (schemaRegistry.getPassword() != null) {
|
||||
throw new ValidationException(
|
||||
"You specified password but do not specified username");
|
||||
}
|
||||
}
|
||||
|
||||
private WebClient.RequestBodySpec configuredWebClient(KafkaCluster cluster, HttpMethod method,
|
||||
String uri, Object... params) {
|
||||
return configuredWebClient(cluster.getSchemaRegistry(), method, uri, params);
|
||||
}
|
||||
|
||||
private WebClient.RequestBodySpec configuredWebClient(InternalSchemaRegistry schemaRegistry,
|
||||
HttpMethod method, String uri,
|
||||
Object... params) {
|
||||
return webClient.method(method)
|
||||
.uri(schemaRegistry.getUrl() + uri, params)
|
||||
.headers(headers -> setBasicAuthIfEnabled(schemaRegistry, headers));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,20 +3,24 @@ package com.provectus.kafka.ui.util;
|
|||
import static com.provectus.kafka.ui.util.KafkaConstants.TOPIC_DEFAULT_CONFIGS;
|
||||
import static org.apache.kafka.common.config.TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG;
|
||||
|
||||
import com.provectus.kafka.ui.deserialization.RecordDeserializer;
|
||||
import com.provectus.kafka.ui.model.Broker;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupDetails;
|
||||
import com.provectus.kafka.ui.model.ConsumerTopicPartitionDetail;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupState;
|
||||
import com.provectus.kafka.ui.model.ConsumerGroupTopicPartition;
|
||||
import com.provectus.kafka.ui.model.ExtendedAdminClient;
|
||||
import com.provectus.kafka.ui.model.InternalConsumerGroup;
|
||||
import com.provectus.kafka.ui.model.InternalPartition;
|
||||
import com.provectus.kafka.ui.model.InternalReplica;
|
||||
import com.provectus.kafka.ui.model.InternalTopic;
|
||||
import com.provectus.kafka.ui.model.InternalTopicConfig;
|
||||
import com.provectus.kafka.ui.model.ServerStatus;
|
||||
import com.provectus.kafka.ui.model.TopicMessage;
|
||||
import com.provectus.kafka.ui.serde.RecordSerDe;
|
||||
import java.time.Instant;
|
||||
import java.time.OffsetDateTime;
|
||||
import java.time.ZoneId;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -31,8 +35,6 @@ import org.apache.kafka.clients.admin.AdminClient;
|
|||
import org.apache.kafka.clients.admin.Config;
|
||||
import org.apache.kafka.clients.admin.ConfigEntry;
|
||||
import org.apache.kafka.clients.admin.ConsumerGroupDescription;
|
||||
import org.apache.kafka.clients.admin.MemberAssignment;
|
||||
import org.apache.kafka.clients.admin.MemberDescription;
|
||||
import org.apache.kafka.clients.admin.TopicDescription;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
|
||||
|
@ -43,6 +45,7 @@ import org.apache.kafka.common.config.ConfigResource;
|
|||
import org.apache.kafka.common.record.TimestampType;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
|
||||
@Slf4j
|
||||
public class ClusterUtil {
|
||||
|
@ -71,57 +74,120 @@ public class ClusterUtil {
|
|||
}));
|
||||
}
|
||||
|
||||
public static ConsumerGroup convertToConsumerGroup(ConsumerGroupDescription c) {
|
||||
ConsumerGroup consumerGroup = new ConsumerGroup();
|
||||
consumerGroup.setConsumerGroupId(c.groupId());
|
||||
consumerGroup.setNumConsumers(c.members().size());
|
||||
int numTopics = c.members().stream()
|
||||
.flatMap(m -> m.assignment().topicPartitions().stream().flatMap(t -> Stream.of(t.topic())))
|
||||
.collect(Collectors.toSet()).size();
|
||||
consumerGroup.setNumTopics(numTopics);
|
||||
consumerGroup.setSimple(c.isSimpleConsumerGroup());
|
||||
Optional.ofNullable(c.state())
|
||||
.ifPresent(s -> consumerGroup.setState(s.name()));
|
||||
Optional.ofNullable(c.coordinator())
|
||||
.ifPresent(coord -> consumerGroup.setCoordintor(coord.host()));
|
||||
consumerGroup.setPartitionAssignor(c.partitionAssignor());
|
||||
public static InternalConsumerGroup convertToInternalConsumerGroup(
|
||||
ConsumerGroupDescription description, Map<TopicPartition, OffsetAndMetadata> offsets) {
|
||||
|
||||
var builder = InternalConsumerGroup.builder();
|
||||
builder.groupId(description.groupId());
|
||||
builder.simple(description.isSimpleConsumerGroup());
|
||||
builder.state(description.state());
|
||||
builder.partitionAssignor(description.partitionAssignor());
|
||||
builder.members(
|
||||
description.members().stream()
|
||||
.map(m ->
|
||||
InternalConsumerGroup.InternalMember.builder()
|
||||
.assignment(m.assignment().topicPartitions())
|
||||
.clientId(m.clientId())
|
||||
.groupInstanceId(m.groupInstanceId().orElse(""))
|
||||
.consumerId(m.consumerId())
|
||||
.clientId(m.clientId())
|
||||
.host(m.host())
|
||||
.build()
|
||||
).collect(Collectors.toList())
|
||||
);
|
||||
builder.offsets(offsets);
|
||||
Optional.ofNullable(description.coordinator()).ifPresent(builder::coordinator);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static ConsumerGroup convertToConsumerGroup(InternalConsumerGroup c) {
|
||||
return convertToConsumerGroup(c, new ConsumerGroup());
|
||||
}
|
||||
|
||||
public static <T extends ConsumerGroup> T convertToConsumerGroup(
|
||||
InternalConsumerGroup c, T consumerGroup) {
|
||||
consumerGroup.setGroupId(c.getGroupId());
|
||||
consumerGroup.setMembers(c.getMembers().size());
|
||||
|
||||
int numTopics = Stream.concat(
|
||||
c.getOffsets().keySet().stream().map(TopicPartition::topic),
|
||||
c.getMembers().stream()
|
||||
.flatMap(m -> m.getAssignment().stream().map(TopicPartition::topic))
|
||||
).collect(Collectors.toSet()).size();
|
||||
|
||||
long messagesBehind = c.getOffsets().entrySet().stream()
|
||||
.mapToLong(e ->
|
||||
Optional.ofNullable(c.getEndOffsets())
|
||||
.map(o -> o.get(e.getKey()))
|
||||
.map(o -> o - e.getValue().offset())
|
||||
.orElse(0L)
|
||||
).sum();
|
||||
|
||||
consumerGroup.setMessagesBehind(messagesBehind);
|
||||
consumerGroup.setTopics(numTopics);
|
||||
consumerGroup.setSimple(c.isSimple());
|
||||
|
||||
Optional.ofNullable(c.getState())
|
||||
.ifPresent(s -> consumerGroup.setState(mapConsumerGroupState(s)));
|
||||
Optional.ofNullable(c.getCoordinator())
|
||||
.ifPresent(cd -> consumerGroup.setCoordinator(mapCoordinator(cd)));
|
||||
|
||||
consumerGroup.setPartitionAssignor(c.getPartitionAssignor());
|
||||
return consumerGroup;
|
||||
}
|
||||
|
||||
public static ConsumerGroupDetails convertToConsumerGroupDetails(
|
||||
ConsumerGroupDescription desc, List<ConsumerTopicPartitionDetail> consumers
|
||||
) {
|
||||
return new ConsumerGroupDetails()
|
||||
.consumers(consumers)
|
||||
.consumerGroupId(desc.groupId())
|
||||
.simple(desc.isSimpleConsumerGroup())
|
||||
.coordintor(Optional.ofNullable(desc.coordinator()).map(Node::host).orElse(""))
|
||||
.state(Optional.ofNullable(desc.state()).map(Enum::name).orElse(""))
|
||||
.partitionAssignor(desc.partitionAssignor());
|
||||
public static ConsumerGroupDetails convertToConsumerGroupDetails(InternalConsumerGroup g) {
|
||||
final ConsumerGroupDetails details = convertToConsumerGroup(g, new ConsumerGroupDetails());
|
||||
Map<TopicPartition, ConsumerGroupTopicPartition> partitionMap = new HashMap<>();
|
||||
|
||||
for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : g.getOffsets().entrySet()) {
|
||||
ConsumerGroupTopicPartition partition = new ConsumerGroupTopicPartition();
|
||||
partition.setTopic(entry.getKey().topic());
|
||||
partition.setPartition(entry.getKey().partition());
|
||||
partition.setCurrentOffset(entry.getValue().offset());
|
||||
|
||||
final Optional<Long> endOffset = Optional.ofNullable(g.getEndOffsets())
|
||||
.map(o -> o.get(entry.getKey()));
|
||||
|
||||
final Long behind = endOffset.map(o -> o - entry.getValue().offset())
|
||||
.orElse(0L);
|
||||
|
||||
partition.setEndOffset(endOffset.orElse(0L));
|
||||
partition.setMessagesBehind(behind);
|
||||
|
||||
partitionMap.put(entry.getKey(), partition);
|
||||
}
|
||||
|
||||
for (InternalConsumerGroup.InternalMember member : g.getMembers()) {
|
||||
for (TopicPartition topicPartition : member.getAssignment()) {
|
||||
final ConsumerGroupTopicPartition partition = partitionMap.computeIfAbsent(topicPartition,
|
||||
(tp) -> new ConsumerGroupTopicPartition()
|
||||
.topic(tp.topic())
|
||||
.partition(tp.partition())
|
||||
);
|
||||
partition.setHost(member.getHost());
|
||||
partition.setConsumerId(member.getConsumerId());
|
||||
partitionMap.put(topicPartition, partition);
|
||||
}
|
||||
}
|
||||
details.setPartitions(new ArrayList<>(partitionMap.values()));
|
||||
return details;
|
||||
}
|
||||
|
||||
public static List<ConsumerTopicPartitionDetail> convertToConsumerTopicPartitionDetails(
|
||||
MemberDescription consumer,
|
||||
Map<TopicPartition, OffsetAndMetadata> groupOffsets,
|
||||
Map<TopicPartition, Long> endOffsets,
|
||||
String groupId
|
||||
) {
|
||||
return consumer.assignment().topicPartitions().stream()
|
||||
.map(tp -> {
|
||||
long currentOffset = Optional.ofNullable(groupOffsets.get(tp))
|
||||
.map(OffsetAndMetadata::offset).orElse(0L);
|
||||
long endOffset = Optional.ofNullable(endOffsets.get(tp)).orElse(0L);
|
||||
ConsumerTopicPartitionDetail cd = new ConsumerTopicPartitionDetail();
|
||||
cd.setGroupId(groupId);
|
||||
cd.setConsumerId(consumer.consumerId());
|
||||
cd.setHost(consumer.host());
|
||||
cd.setTopic(tp.topic());
|
||||
cd.setPartition(tp.partition());
|
||||
cd.setCurrentOffset(currentOffset);
|
||||
cd.setEndOffset(endOffset);
|
||||
cd.setMessagesBehind(endOffset - currentOffset);
|
||||
return cd;
|
||||
}).collect(Collectors.toList());
|
||||
private static Broker mapCoordinator(Node node) {
|
||||
return new Broker().host(node.host()).id(node.id());
|
||||
}
|
||||
|
||||
private static ConsumerGroupState mapConsumerGroupState(
|
||||
org.apache.kafka.common.ConsumerGroupState state) {
|
||||
switch (state) {
|
||||
case DEAD: return ConsumerGroupState.DEAD;
|
||||
case EMPTY: return ConsumerGroupState.EMPTY;
|
||||
case STABLE: return ConsumerGroupState.STABLE;
|
||||
case PREPARING_REBALANCE: return ConsumerGroupState.PREPARING_REBALANCE;
|
||||
case COMPLETING_REBALANCE: return ConsumerGroupState.COMPLETING_REBALANCE;
|
||||
default: return ConsumerGroupState.UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -197,7 +263,7 @@ public class ClusterUtil {
|
|||
}
|
||||
|
||||
public static TopicMessage mapToTopicMessage(ConsumerRecord<Bytes, Bytes> consumerRecord,
|
||||
RecordDeserializer recordDeserializer) {
|
||||
RecordSerDe recordDeserializer) {
|
||||
Map<String, String> headers = new HashMap<>();
|
||||
consumerRecord.headers().iterator()
|
||||
.forEachRemaining(header -> headers.put(header.key(), new String(header.value())));
|
||||
|
@ -212,12 +278,11 @@ public class ClusterUtil {
|
|||
topicMessage.setOffset(consumerRecord.offset());
|
||||
topicMessage.setTimestamp(timestamp);
|
||||
topicMessage.setTimestampType(timestampType);
|
||||
if (consumerRecord.key() != null) {
|
||||
topicMessage.setKey(consumerRecord.key().toString());
|
||||
}
|
||||
|
||||
topicMessage.setHeaders(headers);
|
||||
Object parsedValue = recordDeserializer.deserialize(consumerRecord);
|
||||
topicMessage.setContent(parsedValue);
|
||||
var parsed = recordDeserializer.deserialize(consumerRecord);
|
||||
topicMessage.setKey(parsed.getKey());
|
||||
topicMessage.setContent(parsed.getValue());
|
||||
|
||||
return topicMessage;
|
||||
}
|
||||
|
@ -237,23 +302,12 @@ public class ClusterUtil {
|
|||
|
||||
public static Mono<Set<ExtendedAdminClient.SupportedFeature>> getSupportedFeatures(
|
||||
AdminClient adminClient) {
|
||||
return ClusterUtil.toMono(adminClient.describeCluster().controller())
|
||||
.map(Node::id)
|
||||
.map(id -> Collections
|
||||
.singletonList(new ConfigResource(ConfigResource.Type.BROKER, id.toString())))
|
||||
.map(brokerCR -> adminClient.describeConfigs(brokerCR).all())
|
||||
.flatMap(ClusterUtil::toMono)
|
||||
return getClusterVersion(adminClient)
|
||||
.map(ClusterUtil::getSupportedUpdateFeature)
|
||||
.map(Collections::singleton);
|
||||
}
|
||||
|
||||
private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(
|
||||
Map<ConfigResource, Config> configs) {
|
||||
String version = configs.values().stream()
|
||||
.map(Config::entries)
|
||||
.flatMap(Collection::stream)
|
||||
.filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
|
||||
.findFirst().orElseThrow().value();
|
||||
private static ExtendedAdminClient.SupportedFeature getSupportedUpdateFeature(String version) {
|
||||
try {
|
||||
final String[] parts = version.split("\\.");
|
||||
if (parts.length > 2) {
|
||||
|
@ -268,48 +322,65 @@ public class ClusterUtil {
|
|||
}
|
||||
}
|
||||
|
||||
public static Mono<String> getClusterVersion(AdminClient adminClient) {
|
||||
return ClusterUtil.toMono(adminClient.describeCluster().controller())
|
||||
.map(Node::id)
|
||||
.map(id -> Collections
|
||||
.singletonList(new ConfigResource(ConfigResource.Type.BROKER, id.toString())))
|
||||
.map(brokerCR -> adminClient.describeConfigs(brokerCR).all())
|
||||
.flatMap(ClusterUtil::toMono)
|
||||
.map(ClusterUtil::getClusterVersion);
|
||||
}
|
||||
|
||||
public static String getClusterVersion(Map<ConfigResource, Config> configs) {
|
||||
return configs.values().stream()
|
||||
.map(Config::entries)
|
||||
.flatMap(Collection::stream)
|
||||
.filter(entry -> entry.name().contains(CLUSTER_VERSION_PARAM_KEY))
|
||||
.findFirst().orElseThrow().value();
|
||||
}
|
||||
|
||||
|
||||
public static <T, R> Map<T, R> toSingleMap(Stream<Map<T, R>> streamOfMaps) {
|
||||
return streamOfMaps
|
||||
.reduce((map1, map2) -> Stream.concat(map1.entrySet().stream(), map2.entrySet().stream())
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))).orElseThrow();
|
||||
}
|
||||
|
||||
public static Optional<ConsumerGroupDescription> filterConsumerGroupTopic(
|
||||
ConsumerGroupDescription description, String topic) {
|
||||
final List<MemberDescription> members = description.members().stream()
|
||||
.map(m -> filterConsumerMemberTopic(m, topic))
|
||||
.filter(m -> !m.assignment().topicPartitions().isEmpty())
|
||||
.collect(Collectors.toList());
|
||||
public static Optional<InternalConsumerGroup> filterConsumerGroupTopic(
|
||||
InternalConsumerGroup consumerGroup, Optional<String> topic) {
|
||||
|
||||
if (!members.isEmpty()) {
|
||||
final Map<TopicPartition, OffsetAndMetadata> offsets =
|
||||
consumerGroup.getOffsets().entrySet().stream()
|
||||
.filter(e -> topic.isEmpty() || e.getKey().topic().equals(topic.get()))
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
Map.Entry::getValue
|
||||
));
|
||||
|
||||
final Collection<InternalConsumerGroup.InternalMember> members =
|
||||
consumerGroup.getMembers().stream()
|
||||
.map(m -> filterConsumerMemberTopic(m, topic))
|
||||
.filter(m -> !m.getAssignment().isEmpty())
|
||||
.collect(Collectors.toList());
|
||||
|
||||
if (!members.isEmpty() || !offsets.isEmpty()) {
|
||||
return Optional.of(
|
||||
new ConsumerGroupDescription(
|
||||
description.groupId(),
|
||||
description.isSimpleConsumerGroup(),
|
||||
members,
|
||||
description.partitionAssignor(),
|
||||
description.state(),
|
||||
description.coordinator()
|
||||
)
|
||||
consumerGroup.toBuilder()
|
||||
.offsets(offsets)
|
||||
.members(members)
|
||||
.build()
|
||||
);
|
||||
} else {
|
||||
return Optional.empty();
|
||||
}
|
||||
}
|
||||
|
||||
public static MemberDescription filterConsumerMemberTopic(
|
||||
MemberDescription description, String topic) {
|
||||
final Set<TopicPartition> topicPartitions = description.assignment().topicPartitions()
|
||||
.stream().filter(tp -> tp.topic().equals(topic))
|
||||
public static InternalConsumerGroup.InternalMember filterConsumerMemberTopic(
|
||||
InternalConsumerGroup.InternalMember member, Optional<String> topic) {
|
||||
final Set<TopicPartition> topicPartitions = member.getAssignment()
|
||||
.stream().filter(tp -> topic.isEmpty() || tp.topic().equals(topic.get()))
|
||||
.collect(Collectors.toSet());
|
||||
MemberAssignment assignment = new MemberAssignment(topicPartitions);
|
||||
return new MemberDescription(
|
||||
description.consumerId(),
|
||||
description.groupInstanceId(),
|
||||
description.clientId(),
|
||||
description.host(),
|
||||
assignment
|
||||
);
|
||||
return member.toBuilder().assignment(topicPartitions).build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -3,6 +3,10 @@ package com.provectus.kafka.ui.util;
|
|||
import org.apache.commons.lang3.math.NumberUtils;
|
||||
|
||||
public class NumberUtil {
|
||||
|
||||
private NumberUtil() {
|
||||
}
|
||||
|
||||
public static boolean isNumeric(Object value) {
|
||||
return value != null && NumberUtils.isCreatable(value.toString());
|
||||
}
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.SeekType;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Log4j2
|
||||
public abstract class OffsetsSeek {
|
||||
protected final String topic;
|
||||
protected final ConsumerPosition consumerPosition;
|
||||
|
||||
protected OffsetsSeek(String topic, ConsumerPosition consumerPosition) {
|
||||
this.topic = topic;
|
||||
this.consumerPosition = consumerPosition;
|
||||
}
|
||||
|
||||
public ConsumerPosition getConsumerPosition() {
|
||||
return consumerPosition;
|
||||
}
|
||||
|
||||
public Map<TopicPartition, Long> getPartitionsOffsets(Consumer<Bytes, Bytes> consumer) {
|
||||
SeekType seekType = consumerPosition.getSeekType();
|
||||
List<TopicPartition> partitions = getRequestedPartitions(consumer);
|
||||
log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
|
||||
Map<TopicPartition, Long> offsets;
|
||||
switch (seekType) {
|
||||
case OFFSET:
|
||||
offsets = offsetsFromPositions(consumer, partitions);
|
||||
break;
|
||||
case TIMESTAMP:
|
||||
offsets = offsetsForTimestamp(consumer);
|
||||
break;
|
||||
case BEGINNING:
|
||||
offsets = offsetsFromBeginning(consumer, partitions);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown seekType: " + seekType);
|
||||
}
|
||||
return offsets;
|
||||
}
|
||||
|
||||
public WaitingOffsets waitingOffsets(Consumer<Bytes, Bytes> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
return new WaitingOffsets(topic, consumer, partitions);
|
||||
}
|
||||
|
||||
public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
|
||||
final Map<TopicPartition, Long> partitionsOffsets = getPartitionsOffsets(consumer);
|
||||
consumer.assign(partitionsOffsets.keySet());
|
||||
partitionsOffsets.forEach(consumer::seek);
|
||||
log.info("Assignment: {}", consumer.assignment());
|
||||
return waitingOffsets(consumer, partitionsOffsets.keySet());
|
||||
}
|
||||
|
||||
|
||||
public List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
|
||||
Map<TopicPartition, Long> partitionPositions = consumerPosition.getSeekTo();
|
||||
return consumer.partitionsFor(topic).stream()
|
||||
.filter(
|
||||
p -> partitionPositions.isEmpty()
|
||||
|| partitionPositions.containsKey(new TopicPartition(p.topic(), p.partition()))
|
||||
).map(p -> new TopicPartition(p.topic(), p.partition()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
|
||||
protected abstract Map<TopicPartition, Long> offsetsFromBeginning(
|
||||
Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions);
|
||||
|
||||
protected abstract Map<TopicPartition, Long> offsetsForTimestamp(
|
||||
Consumer<Bytes, Bytes> consumer);
|
||||
|
||||
protected abstract Map<TopicPartition, Long> offsetsFromPositions(
|
||||
Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions);
|
||||
|
||||
public static class WaitingOffsets {
|
||||
private final Map<Integer, Long> endOffsets; // partition number -> offset
|
||||
private final Map<Integer, Long> beginOffsets; // partition number -> offset
|
||||
private final String topic;
|
||||
|
||||
public WaitingOffsets(String topic, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
this.topic = topic;
|
||||
var allBeginningOffsets = consumer.beginningOffsets(partitions);
|
||||
var allEndOffsets = consumer.endOffsets(partitions);
|
||||
|
||||
this.endOffsets = allEndOffsets.entrySet().stream()
|
||||
.filter(entry -> !allBeginningOffsets.get(entry.getKey()).equals(entry.getValue()))
|
||||
.map(e -> Tuples.of(e.getKey().partition(), e.getValue() - 1))
|
||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
||||
|
||||
this.beginOffsets = this.endOffsets.keySet().stream()
|
||||
.map(p -> Tuples.of(p, allBeginningOffsets.get(new TopicPartition(topic, p))))
|
||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
||||
}
|
||||
|
||||
public List<TopicPartition> topicPartitions() {
|
||||
return this.endOffsets.keySet().stream()
|
||||
.map(p -> new TopicPartition(topic, p))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public void markPolled(int partition) {
|
||||
endOffsets.remove(partition);
|
||||
beginOffsets.remove(partition);
|
||||
}
|
||||
|
||||
public void markPolled(ConsumerRecord<?, ?> rec) {
|
||||
Long endWaiting = endOffsets.get(rec.partition());
|
||||
if (endWaiting != null && endWaiting <= rec.offset()) {
|
||||
endOffsets.remove(rec.partition());
|
||||
}
|
||||
Long beginWaiting = beginOffsets.get(rec.partition());
|
||||
if (beginWaiting != null && beginWaiting >= rec.offset()) {
|
||||
beginOffsets.remove(rec.partition());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public boolean endReached() {
|
||||
return endOffsets.isEmpty();
|
||||
}
|
||||
|
||||
public boolean beginReached() {
|
||||
return beginOffsets.isEmpty();
|
||||
}
|
||||
|
||||
public Map<Integer, Long> getEndOffsets() {
|
||||
return endOffsets;
|
||||
}
|
||||
|
||||
public Map<Integer, Long> getBeginOffsets() {
|
||||
return beginOffsets;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,120 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Log4j2
|
||||
public class OffsetsSeekBackward extends OffsetsSeek {
|
||||
|
||||
private final int maxMessages;
|
||||
|
||||
public OffsetsSeekBackward(String topic,
|
||||
ConsumerPosition consumerPosition, int maxMessages) {
|
||||
super(topic, consumerPosition);
|
||||
this.maxMessages = maxMessages;
|
||||
}
|
||||
|
||||
public int msgsPerPartition(int partitionsSize) {
|
||||
return msgsPerPartition(maxMessages, partitionsSize);
|
||||
}
|
||||
|
||||
public int msgsPerPartition(long awaitingMessages, int partitionsSize) {
|
||||
return (int) Math.ceil((double) awaitingMessages / partitionsSize);
|
||||
}
|
||||
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
|
||||
List<TopicPartition> partitions) {
|
||||
|
||||
return findOffsetsInt(consumer, consumerPosition.getSeekTo(), partitions);
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
|
||||
List<TopicPartition> partitions) {
|
||||
return findOffsets(consumer, Map.of(), partitions);
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsForTimestamp(Consumer<Bytes, Bytes> consumer) {
|
||||
Map<TopicPartition, Long> timestampsToSearch =
|
||||
consumerPosition.getSeekTo().entrySet().stream()
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
e -> e.getValue()
|
||||
));
|
||||
Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
|
||||
.entrySet().stream()
|
||||
.filter(e -> e.getValue() != null)
|
||||
.map(v -> Tuples.of(v.getKey(), v.getValue().offset()))
|
||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
||||
|
||||
if (offsetsForTimestamps.isEmpty()) {
|
||||
throw new IllegalArgumentException("No offsets were found for requested timestamps");
|
||||
}
|
||||
|
||||
log.info("Timestamps: {} to offsets: {}", timestampsToSearch, offsetsForTimestamps);
|
||||
|
||||
return findOffsets(consumer, offsetsForTimestamps, offsetsForTimestamps.keySet());
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> findOffsetsInt(
|
||||
Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo,
|
||||
List<TopicPartition> partitions) {
|
||||
return findOffsets(consumer, seekTo, partitions);
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> findOffsets(
|
||||
Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo,
|
||||
Collection<TopicPartition> partitions) {
|
||||
|
||||
final Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
|
||||
final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
|
||||
|
||||
final Map<TopicPartition, Long> seekMap = new HashMap<>();
|
||||
final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
||||
|
||||
for (Map.Entry<TopicPartition, Long> entry : seekTo.entrySet()) {
|
||||
final Long endOffset = endOffsets.get(entry.getKey());
|
||||
final Long beginningOffset = beginningOffsets.get(entry.getKey());
|
||||
if (beginningOffset != null
|
||||
&& endOffset != null
|
||||
&& beginningOffset < endOffset
|
||||
&& entry.getValue() > beginningOffset
|
||||
) {
|
||||
final Long value;
|
||||
if (entry.getValue() > endOffset) {
|
||||
value = endOffset;
|
||||
} else {
|
||||
value = entry.getValue();
|
||||
}
|
||||
|
||||
seekMap.put(entry.getKey(), value);
|
||||
} else {
|
||||
emptyPartitions.add(entry.getKey());
|
||||
}
|
||||
}
|
||||
|
||||
Set<TopicPartition> waiting = new HashSet<>(partitions);
|
||||
waiting.removeAll(emptyPartitions);
|
||||
waiting.removeAll(seekMap.keySet());
|
||||
|
||||
for (TopicPartition topicPartition : waiting) {
|
||||
seekMap.put(topicPartition, endOffsets.get(topicPartition));
|
||||
}
|
||||
|
||||
return seekMap;
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.extern.log4j.Log4j2;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
@Log4j2
|
||||
public class OffsetsSeekForward extends OffsetsSeek {
|
||||
|
||||
public OffsetsSeekForward(String topic, ConsumerPosition consumerPosition) {
|
||||
super(topic, consumerPosition);
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
|
||||
List<TopicPartition> partitions) {
|
||||
final Map<TopicPartition, Long> offsets =
|
||||
offsetsFromBeginning(consumer, partitions);
|
||||
|
||||
final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(offsets.keySet());
|
||||
final Set<TopicPartition> set = new HashSet<>(consumerPosition.getSeekTo().keySet());
|
||||
final Map<TopicPartition, Long> collect = consumerPosition.getSeekTo().entrySet().stream()
|
||||
.filter(e -> e.getValue() < endOffsets.get(e.getKey()))
|
||||
.filter(e -> endOffsets.get(e.getKey()) > offsets.get(e.getKey()))
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
Map.Entry::getValue
|
||||
));
|
||||
offsets.putAll(collect);
|
||||
set.removeAll(collect.keySet());
|
||||
set.forEach(offsets::remove);
|
||||
|
||||
return offsets;
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsForTimestamp(Consumer<Bytes, Bytes> consumer) {
|
||||
Map<TopicPartition, Long> offsetsForTimestamps =
|
||||
consumer.offsetsForTimes(consumerPosition.getSeekTo())
|
||||
.entrySet().stream()
|
||||
.filter(e -> e.getValue() != null)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
|
||||
|
||||
if (offsetsForTimestamps.isEmpty()) {
|
||||
throw new IllegalArgumentException("No offsets were found for requested timestamps");
|
||||
}
|
||||
|
||||
return offsetsForTimestamps;
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
|
||||
List<TopicPartition> partitions) {
|
||||
return consumer.beginningOffsets(partitions);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.io.Reader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.springframework.core.io.Resource;
|
||||
import org.springframework.util.FileCopyUtils;
|
||||
|
||||
public class ResourceUtil {
|
||||
|
||||
private ResourceUtil() {
|
||||
}
|
||||
|
||||
public static String readAsString(Resource resource) throws IOException {
|
||||
try (Reader reader = new InputStreamReader(resource.getInputStream(), StandardCharsets.UTF_8)) {
|
||||
return FileCopyUtils.copyToString(reader);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package com.provectus.kafka.ui.util.annotations;
|
||||
|
||||
/**
|
||||
* All code places that depend on kafka-client's internals or implementation-specific logic
|
||||
* should be marked with this annotation to make further update process easier.
|
||||
*/
|
||||
public @interface KafkaClientInternalsDependant {
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
|
||||
public class ArrayFieldSchema implements FieldSchema {
|
||||
private final FieldSchema itemsSchema;
|
||||
|
||||
public ArrayFieldSchema(FieldSchema itemsSchema) {
|
||||
this.itemsSchema = itemsSchema;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonNode toJsonNode(ObjectMapper mapper) {
|
||||
final ObjectNode objectNode = mapper.createObjectNode();
|
||||
objectNode.setAll(new SimpleJsonType(JsonType.Type.ARRAY).toJsonNode(mapper));
|
||||
objectNode.set("items", itemsSchema.toJsonNode(mapper));
|
||||
return objectNode;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,137 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
import org.apache.avro.Schema;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
public class AvroJsonSchemaConverter implements JsonSchemaConverter<Schema> {
|
||||
|
||||
@Override
|
||||
public JsonSchema convert(URI basePath, Schema schema) {
|
||||
final JsonSchema.JsonSchemaBuilder builder = JsonSchema.builder();
|
||||
|
||||
builder.id(basePath.resolve(schema.getName()));
|
||||
JsonType type = convertType(schema);
|
||||
builder.type(type);
|
||||
|
||||
Map<String, FieldSchema> definitions = new HashMap<>();
|
||||
final FieldSchema root = convertSchema("root", schema, definitions, false);
|
||||
builder.definitions(definitions);
|
||||
|
||||
if (type.getType().equals(JsonType.Type.OBJECT)) {
|
||||
final ObjectFieldSchema objectRoot = (ObjectFieldSchema) root;
|
||||
builder.properties(objectRoot.getProperties());
|
||||
builder.required(objectRoot.getRequired());
|
||||
}
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
||||
private FieldSchema convertField(Schema.Field field, Map<String, FieldSchema> definitions) {
|
||||
return convertSchema(field.name(), field.schema(), definitions, true);
|
||||
}
|
||||
|
||||
private FieldSchema convertSchema(String name, Schema schema,
|
||||
Map<String, FieldSchema> definitions, boolean ref) {
|
||||
if (!schema.isUnion() || (schema.getTypes().size() == 2 && schema.isNullable())) {
|
||||
if (schema.isUnion()) {
|
||||
final Optional<Schema> firstType =
|
||||
schema.getTypes().stream().filter(t -> !t.getType().equals(Schema.Type.NULL))
|
||||
.findFirst();
|
||||
schema = firstType.orElseThrow();
|
||||
}
|
||||
JsonType type = convertType(schema);
|
||||
switch (type.getType()) {
|
||||
case BOOLEAN:
|
||||
case NULL:
|
||||
case STRING:
|
||||
case ENUM:
|
||||
case NUMBER:
|
||||
case INTEGER:
|
||||
return new SimpleFieldSchema(type);
|
||||
case OBJECT:
|
||||
if (schema.getType().equals(Schema.Type.MAP)) {
|
||||
return new MapFieldSchema(convertSchema(name, schema.getValueType(), definitions, ref));
|
||||
} else {
|
||||
return createObjectSchema(name, schema, definitions, ref);
|
||||
}
|
||||
case ARRAY:
|
||||
return createArraySchema(name, schema, definitions);
|
||||
default: throw new RuntimeException("Unknown type");
|
||||
}
|
||||
} else {
|
||||
return new OneOfFieldSchema(
|
||||
schema.getTypes().stream()
|
||||
.map(typeSchema ->
|
||||
convertSchema(
|
||||
name + UUID.randomUUID().toString(),
|
||||
typeSchema,
|
||||
definitions,
|
||||
true
|
||||
)
|
||||
).collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
private FieldSchema createObjectSchema(String name, Schema schema,
|
||||
Map<String, FieldSchema> definitions, boolean ref) {
|
||||
final Map<String, FieldSchema> fields = schema.getFields().stream()
|
||||
.map(f -> Tuples.of(f.name(), convertField(f, definitions)))
|
||||
.collect(Collectors.toMap(
|
||||
Tuple2::getT1,
|
||||
Tuple2::getT2
|
||||
));
|
||||
|
||||
final List<String> required = schema.getFields().stream()
|
||||
.filter(f -> !f.schema().isNullable())
|
||||
.map(Schema.Field::name).collect(Collectors.toList());
|
||||
|
||||
if (ref) {
|
||||
String definitionName = String.format("Record%s", schema.getName());
|
||||
definitions.put(definitionName, new ObjectFieldSchema(fields, required));
|
||||
return new RefFieldSchema(String.format("#/definitions/%s", definitionName));
|
||||
} else {
|
||||
return new ObjectFieldSchema(fields, required);
|
||||
}
|
||||
}
|
||||
|
||||
private ArrayFieldSchema createArraySchema(String name, Schema schema,
|
||||
Map<String, FieldSchema> definitions) {
|
||||
return new ArrayFieldSchema(
|
||||
convertSchema(name, schema.getElementType(), definitions, true)
|
||||
);
|
||||
}
|
||||
|
||||
private JsonType convertType(Schema schema) {
|
||||
switch (schema.getType()) {
|
||||
case INT:
|
||||
case LONG:
|
||||
return new SimpleJsonType(JsonType.Type.INTEGER);
|
||||
case MAP:
|
||||
case RECORD:
|
||||
return new SimpleJsonType(JsonType.Type.OBJECT);
|
||||
case ENUM:
|
||||
return new EnumJsonType(schema.getEnumSymbols());
|
||||
case BYTES:
|
||||
case STRING:
|
||||
return new SimpleJsonType(JsonType.Type.STRING);
|
||||
case NULL: return new SimpleJsonType(JsonType.Type.NULL);
|
||||
case ARRAY: return new SimpleJsonType(JsonType.Type.ARRAY);
|
||||
case FIXED:
|
||||
case FLOAT:
|
||||
case DOUBLE:
|
||||
return new SimpleJsonType(JsonType.Type.NUMBER);
|
||||
case BOOLEAN: return new SimpleJsonType(JsonType.Type.BOOLEAN);
|
||||
default: return new SimpleJsonType(JsonType.Type.STRING);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
|
||||
public class EnumJsonType extends JsonType {
|
||||
private final List<String> values;
|
||||
|
||||
public EnumJsonType(List<String> values) {
|
||||
super(Type.ENUM);
|
||||
this.values = values;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, JsonNode> toJsonNode(ObjectMapper mapper) {
|
||||
return Map.of(
|
||||
this.type.getName(),
|
||||
mapper.valueToTree(values)
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
public interface FieldSchema {
|
||||
JsonNode toJsonNode(ObjectMapper mapper);
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
import com.fasterxml.jackson.databind.node.TextNode;
|
||||
import java.net.URI;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Builder;
|
||||
import lombok.Data;
|
||||
import lombok.SneakyThrows;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Data
|
||||
@Builder
|
||||
public class JsonSchema {
|
||||
private final URI id;
|
||||
private final URI schema = URI.create("https://json-schema.org/draft/2020-12/schema");
|
||||
private final String title;
|
||||
private final JsonType type;
|
||||
private final Map<String, FieldSchema> properties;
|
||||
private final Map<String, FieldSchema> definitions;
|
||||
private final List<String> required;
|
||||
|
||||
public String toJson(ObjectMapper mapper) {
|
||||
final ObjectNode objectNode = mapper.createObjectNode();
|
||||
objectNode.set("$id", new TextNode(id.toString()));
|
||||
objectNode.set("$schema", new TextNode(schema.toString()));
|
||||
objectNode.setAll(type.toJsonNode(mapper));
|
||||
if (properties != null && !properties.isEmpty()) {
|
||||
objectNode.set("properties", mapper.valueToTree(
|
||||
properties.entrySet().stream()
|
||||
.map(e -> Tuples.of(e.getKey(), e.getValue().toJsonNode(mapper)))
|
||||
.collect(Collectors.toMap(
|
||||
Tuple2::getT1,
|
||||
Tuple2::getT2
|
||||
))
|
||||
));
|
||||
if (!required.isEmpty()) {
|
||||
objectNode.set("required", mapper.valueToTree(required));
|
||||
}
|
||||
}
|
||||
if (definitions != null && !definitions.isEmpty()) {
|
||||
objectNode.set("definitions", mapper.valueToTree(
|
||||
definitions.entrySet().stream()
|
||||
.map(e -> Tuples.of(e.getKey(), e.getValue().toJsonNode(mapper)))
|
||||
.collect(Collectors.toMap(
|
||||
Tuple2::getT1,
|
||||
Tuple2::getT2
|
||||
))
|
||||
));
|
||||
}
|
||||
return objectNode.toString();
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
public static JsonSchema stringSchema() {
|
||||
return JsonSchema.builder()
|
||||
.id(new URI("http://unknown.unknown"))
|
||||
.type(new SimpleJsonType(JsonType.Type.STRING))
|
||||
.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
public interface JsonSchemaConverter<T> {
|
||||
JsonSchema convert(URI basePath, T schema);
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import java.util.Map;
|
||||
|
||||
public abstract class JsonType {
|
||||
|
||||
protected final Type type;
|
||||
|
||||
public JsonType(Type type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
public Type getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
public abstract Map<String, JsonNode> toJsonNode(ObjectMapper mapper);
|
||||
|
||||
public enum Type {
|
||||
NULL,
|
||||
BOOLEAN,
|
||||
OBJECT,
|
||||
ARRAY,
|
||||
NUMBER,
|
||||
INTEGER,
|
||||
ENUM,
|
||||
STRING;
|
||||
|
||||
private final String name;
|
||||
|
||||
Type() {
|
||||
this.name = this.name().toLowerCase();
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
import com.fasterxml.jackson.databind.node.TextNode;
|
||||
|
||||
public class MapFieldSchema implements FieldSchema {
|
||||
private final FieldSchema itemSchema;
|
||||
|
||||
public MapFieldSchema(FieldSchema itemSchema) {
|
||||
this.itemSchema = itemSchema;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonNode toJsonNode(ObjectMapper mapper) {
|
||||
final ObjectNode objectNode = mapper.createObjectNode();
|
||||
objectNode.set("type", new TextNode(JsonType.Type.OBJECT.getName()));
|
||||
objectNode.set("additionalProperties", itemSchema.toJsonNode(mapper));
|
||||
return objectNode;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.ObjectNode;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
public class ObjectFieldSchema implements FieldSchema {
|
||||
private final Map<String, FieldSchema> properties;
|
||||
private final List<String> required;
|
||||
|
||||
public ObjectFieldSchema(Map<String, FieldSchema> properties,
|
||||
List<String> required) {
|
||||
this.properties = properties;
|
||||
this.required = required;
|
||||
}
|
||||
|
||||
public Map<String, FieldSchema> getProperties() {
|
||||
return properties;
|
||||
}
|
||||
|
||||
public List<String> getRequired() {
|
||||
return required;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonNode toJsonNode(ObjectMapper mapper) {
|
||||
final Map<String, JsonNode> nodes = properties.entrySet().stream()
|
||||
.map(e -> Tuples.of(e.getKey(), e.getValue().toJsonNode(mapper)))
|
||||
.collect(Collectors.toMap(
|
||||
Tuple2::getT1,
|
||||
Tuple2::getT2
|
||||
));
|
||||
final ObjectNode objectNode = mapper.createObjectNode();
|
||||
objectNode.setAll(new SimpleJsonType(JsonType.Type.OBJECT).toJsonNode(mapper));
|
||||
objectNode.set("properties", mapper.valueToTree(nodes));
|
||||
if (!required.isEmpty()) {
|
||||
objectNode.set("required", mapper.valueToTree(required));
|
||||
}
|
||||
return objectNode;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import java.util.List;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
public class OneOfFieldSchema implements FieldSchema {
|
||||
private final List<FieldSchema> schemaList;
|
||||
|
||||
public OneOfFieldSchema(
|
||||
List<FieldSchema> schemaList) {
|
||||
this.schemaList = schemaList;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonNode toJsonNode(ObjectMapper mapper) {
|
||||
return mapper.createObjectNode()
|
||||
.set("oneOf",
|
||||
mapper.createArrayNode().addAll(
|
||||
schemaList.stream()
|
||||
.map(s -> s.toJsonNode(mapper))
|
||||
.collect(Collectors.toList())
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.google.protobuf.Descriptors;
|
||||
import java.net.URI;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
public class ProtobufSchemaConverter implements JsonSchemaConverter<Descriptors.Descriptor> {
|
||||
@Override
|
||||
public JsonSchema convert(URI basePath, Descriptors.Descriptor schema) {
|
||||
final JsonSchema.JsonSchemaBuilder builder = JsonSchema.builder();
|
||||
|
||||
builder.id(basePath.resolve(schema.getFullName()));
|
||||
builder.type(new SimpleJsonType(JsonType.Type.OBJECT));
|
||||
|
||||
Map<String, FieldSchema> definitions = new HashMap<>();
|
||||
final ObjectFieldSchema root =
|
||||
(ObjectFieldSchema) convertObjectSchema(schema, definitions, false);
|
||||
builder.definitions(definitions);
|
||||
|
||||
builder.properties(root.getProperties());
|
||||
builder.required(root.getRequired());
|
||||
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
private FieldSchema convertObjectSchema(Descriptors.Descriptor schema,
|
||||
Map<String, FieldSchema> definitions, boolean ref) {
|
||||
final Map<String, FieldSchema> fields = schema.getFields().stream()
|
||||
.map(f -> Tuples.of(f.getName(), convertField(f, definitions)))
|
||||
.collect(Collectors.toMap(
|
||||
Tuple2::getT1,
|
||||
Tuple2::getT2
|
||||
));
|
||||
|
||||
final Map<String, OneOfFieldSchema> oneOfFields = schema.getOneofs().stream().map(o ->
|
||||
Tuples.of(
|
||||
o.getName(),
|
||||
new OneOfFieldSchema(
|
||||
o.getFields().stream().map(
|
||||
Descriptors.FieldDescriptor::getName
|
||||
).map(fields::get).collect(Collectors.toList())
|
||||
)
|
||||
)
|
||||
).collect(Collectors.toMap(
|
||||
Tuple2::getT1,
|
||||
Tuple2::getT2
|
||||
));
|
||||
|
||||
final List<String> allOneOfFields = schema.getOneofs().stream().flatMap(o ->
|
||||
o.getFields().stream().map(Descriptors.FieldDescriptor::getName)
|
||||
).collect(Collectors.toList());
|
||||
|
||||
final Map<String, FieldSchema> excludedOneOf = fields.entrySet().stream()
|
||||
.filter(f -> !allOneOfFields.contains(f.getKey()))
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
Map.Entry::getValue
|
||||
));
|
||||
|
||||
Map<String, FieldSchema> finalFields = new HashMap<>(excludedOneOf);
|
||||
finalFields.putAll(oneOfFields);
|
||||
|
||||
final List<String> required = schema.getFields().stream()
|
||||
.filter(f -> !f.isOptional())
|
||||
.map(Descriptors.FieldDescriptor::getName).collect(Collectors.toList());
|
||||
|
||||
if (ref) {
|
||||
String definitionName = String.format("record.%s", schema.getFullName());
|
||||
definitions.put(definitionName, new ObjectFieldSchema(finalFields, required));
|
||||
return new RefFieldSchema(String.format("#/definitions/%s", definitionName));
|
||||
} else {
|
||||
return new ObjectFieldSchema(fields, required);
|
||||
}
|
||||
}
|
||||
|
||||
private FieldSchema convertField(Descriptors.FieldDescriptor field,
|
||||
Map<String, FieldSchema> definitions) {
|
||||
final JsonType jsonType = convertType(field);
|
||||
|
||||
FieldSchema fieldSchema;
|
||||
if (jsonType.getType().equals(JsonType.Type.OBJECT)) {
|
||||
fieldSchema = convertObjectSchema(field.getMessageType(), definitions, true);
|
||||
} else {
|
||||
fieldSchema = new SimpleFieldSchema(jsonType);
|
||||
}
|
||||
|
||||
if (field.isRepeated()) {
|
||||
return new ArrayFieldSchema(fieldSchema);
|
||||
} else {
|
||||
return fieldSchema;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private JsonType convertType(Descriptors.FieldDescriptor field) {
|
||||
switch (field.getType()) {
|
||||
case INT32:
|
||||
case INT64:
|
||||
case SINT32:
|
||||
case SINT64:
|
||||
case UINT32:
|
||||
case UINT64:
|
||||
case FIXED32:
|
||||
case FIXED64:
|
||||
case SFIXED32:
|
||||
case SFIXED64:
|
||||
return new SimpleJsonType(JsonType.Type.INTEGER);
|
||||
case MESSAGE:
|
||||
case GROUP:
|
||||
return new SimpleJsonType(JsonType.Type.OBJECT);
|
||||
case ENUM:
|
||||
return new EnumJsonType(
|
||||
field.getEnumType().getValues().stream()
|
||||
.map(Descriptors.EnumValueDescriptor::getName)
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
case BYTES:
|
||||
case STRING:
|
||||
return new SimpleJsonType(JsonType.Type.STRING);
|
||||
case FLOAT:
|
||||
case DOUBLE:
|
||||
return new SimpleJsonType(JsonType.Type.NUMBER);
|
||||
case BOOL:
|
||||
return new SimpleJsonType(JsonType.Type.BOOLEAN);
|
||||
default:
|
||||
return new SimpleJsonType(JsonType.Type.STRING);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.TextNode;
|
||||
|
||||
public class RefFieldSchema implements FieldSchema {
|
||||
private final String ref;
|
||||
|
||||
public RefFieldSchema(String ref) {
|
||||
this.ref = ref;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonNode toJsonNode(ObjectMapper mapper) {
|
||||
return mapper.createObjectNode().set("$ref", new TextNode(ref));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
|
||||
public class SimpleFieldSchema implements FieldSchema {
|
||||
private final JsonType type;
|
||||
|
||||
public SimpleFieldSchema(JsonType type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public JsonNode toJsonNode(ObjectMapper mapper) {
|
||||
return mapper.createObjectNode().setAll(type.toJsonNode(mapper));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package com.provectus.kafka.ui.util.jsonschema;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.fasterxml.jackson.databind.node.TextNode;
|
||||
import java.util.Map;
|
||||
|
||||
public class SimpleJsonType extends JsonType {
|
||||
|
||||
public SimpleJsonType(Type type) {
|
||||
super(type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, JsonNode> toJsonNode(ObjectMapper mapper) {
|
||||
return Map.of(
|
||||
"type",
|
||||
new TextNode(type.getName())
|
||||
);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue