diff --git a/.github/workflows/frontend.yaml b/.github/workflows/frontend.yaml
index 7db1337a0f..baa2551d1c 100644
--- a/.github/workflows/frontend.yaml
+++ b/.github/workflows/frontend.yaml
@@ -20,11 +20,11 @@ jobs:
# Disabling shallow clone is recommended for improving relevancy of reporting
fetch-depth: 0
ref: ${{ github.event.pull_request.head.sha }}
- - uses: pnpm/action-setup@v2.2.3
+ - uses: pnpm/action-setup@v2.2.4
with:
version: 7.4.0
- name: Install node
- uses: actions/setup-node@v3.4.1
+ uses: actions/setup-node@v3.5.1
with:
node-version: "16.15.0"
cache: "pnpm"
diff --git a/.github/workflows/helm.yaml b/.github/workflows/helm.yaml
index 255f92260f..b8c88a4305 100644
--- a/.github/workflows/helm.yaml
+++ b/.github/workflows/helm.yaml
@@ -12,9 +12,18 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Helm tool installer
- uses: Azure/setup-helm@v1
+ uses: Azure/setup-helm@v3
- name: Setup Kubeval
uses: lra/setup-kubeval@v1.0.1
+ #check, was helm version increased in Chart.yaml?
+ - name: Check version
+ shell: bash
+ run: |
+ helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
+ helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}' )
+ echo $helm_version_old
+ echo $helm_version_new
+ if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
- name: Run kubeval
shell: bash
run: |
@@ -27,17 +36,3 @@ jobs:
echo $version;
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
done
- #check, was helm version increased in Chart.yaml?
- - name: Check version
- shell: bash
- run: |
- git fetch
- git checkout master
- helm_version_old=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
- git checkout $GITHUB_HEAD_REF
- helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
- echo $helm_version_old
- echo $helm_version_new
- if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
-
-
diff --git a/.github/workflows/release-helm.yaml b/.github/workflows/release-helm.yaml
index 640c0acc1c..2e9cdb280d 100644
--- a/.github/workflows/release-helm.yaml
+++ b/.github/workflows/release-helm.yaml
@@ -19,19 +19,20 @@ jobs:
git config user.name github-actions
git config user.email github-actions@github.com
- - uses: azure/setup-helm@v1
+ - uses: azure/setup-helm@v3
- name: add chart #realse helm with new version
run: |
- echo "VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')" >> $GITHUB_ENV
- MSG=$(helm package charts/kafka-ui)
+ VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
+ echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
+ MSG=$(helm package charts/kafka-ui)
git fetch origin
git stash
git checkout -b gh-pages origin/gh-pages
helm repo index .
git add -f ${MSG##*/} index.yaml
- git commit -m "release ${{ env.VERSION }}"
+ git commit -m "release ${VERSION}"
git push
- uses: rickstaa/action-create-tag@v1 #create new tag
with:
- tag: "charts/kafka-ui-${{ env.VERSION }}"
+ tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"
diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml
index 5e9ac844fb..aafb50ceda 100644
--- a/.github/workflows/stale.yaml
+++ b/.github/workflows/stale.yaml
@@ -7,7 +7,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- - uses: actions/stale@v5
+ - uses: actions/stale@v6
with:
days-before-issue-stale: 7
days-before-issue-close: 3
diff --git a/charts/kafka-ui/Chart.yaml b/charts/kafka-ui/Chart.yaml
index e0d9b0b161..2da8388a44 100644
--- a/charts/kafka-ui/Chart.yaml
+++ b/charts/kafka-ui/Chart.yaml
@@ -2,6 +2,6 @@ apiVersion: v2
name: kafka-ui
description: A Helm chart for kafka-UI
type: application
-version: 0.4.3
-appVersion: latest
+version: 0.4.4
+appVersion: v0.4.0
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
diff --git a/charts/kafka-ui/templates/deployment.yaml b/charts/kafka-ui/templates/deployment.yaml
index 1f7f6c92ad..51703c7ed3 100644
--- a/charts/kafka-ui/templates/deployment.yaml
+++ b/charts/kafka-ui/templates/deployment.yaml
@@ -18,6 +18,7 @@ spec:
{{- toYaml . | nindent 8 }}
{{- end }}
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
+ checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
labels:
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
@@ -136,4 +137,4 @@ spec:
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
- {{- end }}
\ No newline at end of file
+ {{- end }}
diff --git a/docker-compose.md b/docker-compose.md
index d3912c6715..154882351c 100644
--- a/docker-compose.md
+++ b/docker-compose.md
@@ -17,7 +17,6 @@ services:
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
- - KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
```
* If you prefer UI for Apache Kafka in read only mode
@@ -34,7 +33,6 @@ services:
environment:
- KAFKA_CLUSTERS_0_NAME=local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
- - KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
- KAFKA_CLUSTERS_0_READONLY=true
```
diff --git a/documentation/compose/DOCKER_COMPOSE.md b/documentation/compose/DOCKER_COMPOSE.md
index c380c99173..00f54101bd 100644
--- a/documentation/compose/DOCKER_COMPOSE.md
+++ b/documentation/compose/DOCKER_COMPOSE.md
@@ -13,3 +13,4 @@
11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
+14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper
\ No newline at end of file
diff --git a/documentation/compose/auth-ldap.yaml b/documentation/compose/auth-ldap.yaml
index 4d296927c9..0e2f6337d8 100644
--- a/documentation/compose/auth-ldap.yaml
+++ b/documentation/compose/auth-ldap.yaml
@@ -8,24 +8,13 @@ services:
ports:
- 8080:8080
depends_on:
- - zookeeper0
- kafka0
- schemaregistry0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
- KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
- KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
- KAFKA_CLUSTERS_1_NAME: secondLocal
- KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
- KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
- KAFKA_CLUSTERS_1_METRICS_PORT: 9998
- KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
- KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
- KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
AUTH_TYPE: "LDAP"
SPRING_LDAP_URLS: "ldap://ldap:10389"
SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
@@ -47,41 +36,43 @@ services:
image: rroemhild/test-openldap:latest
hostname: "ldap"
- zookeeper0:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2181:2181
-
kafka0:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper0
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka0
+ container_name: kafka0
ports:
- - 9092:9092
- - 9997:9997
+ - "9092:9092"
+ - "9997:9997"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- JMX_PORT: 9997
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: localhost
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
+ volumes:
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
- image: confluentinc/cp-schema-registry:5.5.0
+ image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
- - zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
diff --git a/documentation/compose/e2e-tests.yaml b/documentation/compose/e2e-tests.yaml
index e09ec517d2..205e6ec1d8 100644
--- a/documentation/compose/e2e-tests.yaml
+++ b/documentation/compose/e2e-tests.yaml
@@ -8,57 +8,55 @@ services:
ports:
- 8080:8080
depends_on:
- - zookeeper0
- kafka0
- schemaregistry0
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
- zookeeper0:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2181:2181
-
kafka0:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper0
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka0
+ container_name: kafka0
ports:
- - 9092:9092
- - 9997:9997
+ - "9092:9092"
+ - "9997:9997"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
- JMX_PORT: 9997
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: localhost
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
+ volumes:
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
- image: confluentinc/cp-schema-registry:5.5.0
+ image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
- - zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
@@ -98,17 +96,16 @@ services:
# AWS_SECRET_ACCESS_KEY: ""
kafka-init-topics:
- image: confluentinc/cp-kafka:5.3.1
+ image: confluentinc/cp-kafka:7.2.1
volumes:
- ./message.json:/data/message.json
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \
- cub kafka-ready -b kafka1:29092 1 30 && \
- kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
+ cub kafka-ready -b kafka0:29092 1 30 && \
+ kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
postgres-db:
build:
diff --git a/documentation/compose/kafka-cluster-sr-auth.yaml b/documentation/compose/kafka-cluster-sr-auth.yaml
index 6dbcb12e36..bf52f33d88 100644
--- a/documentation/compose/kafka-cluster-sr-auth.yaml
+++ b/documentation/compose/kafka-cluster-sr-auth.yaml
@@ -2,43 +2,44 @@
version: '2'
services:
- zookeeper1:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2182:2181
-
kafka1:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper1
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka1
+ container_name: kafka1
+ ports:
+ - "9092:9092"
+ - "9997:9997"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- JMX_PORT: 9998
- KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
- ports:
- - 9093:9093
- - 9998:9998
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: localhost
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
+ volumes:
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry1:
- image: confluentinc/cp-schema-registry:5.5.0
+ image: confluentinc/cp-schema-registry:7.2.1
ports:
- 18085:8085
depends_on:
- - zookeeper1
- kafka1
volumes:
- ./jaas:/conf
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
@@ -54,13 +55,29 @@ services:
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-init-topics:
- image: confluentinc/cp-kafka:5.3.1
+ image: confluentinc/cp-kafka:7.2.1
volumes:
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:29092 1 30 && \
- kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
+ kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
+ kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
+ kafka-console-producer --bootstrap-server kafka1:29092 --topic users < /data/message.json'"
+
+ kafka-ui:
+ container_name: kafka-ui
+ image: provectuslabs/kafka-ui:latest
+ ports:
+ - 8080:8080
+ depends_on:
+ - kafka1
+ - schemaregistry1
+ environment:
+ KAFKA_CLUSTERS_0_NAME: local
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka1:29092
+ KAFKA_CLUSTERS_0_METRICS_PORT: 9997
+ KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
+ KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
+ KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
\ No newline at end of file
diff --git a/documentation/compose/kafka-clusters-only.yaml b/documentation/compose/kafka-clusters-only.yaml
index 1e51dd5a4c..b6a84ee92b 100644
--- a/documentation/compose/kafka-clusters-only.yaml
+++ b/documentation/compose/kafka-clusters-only.yaml
@@ -1,83 +1,41 @@
---
-version: '2'
+version: "2"
services:
-
- zookeeper0:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2181:2181
-
kafka0:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper0
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka0
+ container_name: kafka0
+ ports:
+ - "9092:9092"
+ - "9997:9997"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
- JMX_PORT: 9997
- KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9997
- ports:
- - 9092:9092
- - 9997:9997
-
- kafka01:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper0
- environment:
- KAFKA_BROKER_ID: 2
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka01:29092,PLAINTEXT_HOST://localhost:9094
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,PLAIN:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
- KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
- JMX_PORT: 9999
- KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9999
- ports:
- - 9094:9094
- - 9999:9999
-
- zookeeper1:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2182:2181
-
- kafka1:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper1
- environment:
- KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
+ KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- JMX_PORT: 9998
- KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
- ports:
- - 9093:9093
- - 9998:9998
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: localhost
+ KAFKA_PROCESS_ROLES: "broker,controller"
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
+ KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
+ KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
+ KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
+ KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
+ volumes:
+ - ./scripts/update_run_cluster.sh:/tmp/update_run.sh
+ - ./scripts/clusterID:/tmp/clusterID
+ command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
schemaregistry0:
- image: confluentinc/cp-schema-registry:5.5.0
+ image: confluentinc/cp-schema-registry:7.2.1
depends_on:
- - zookeeper0
- kafka0
- - kafka01
environment:
- SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092,PLAINTEXT://kafka01:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
+ SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
@@ -86,28 +44,10 @@ services:
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
ports:
- - 8085:8085
-
- schemaregistry1:
- image: confluentinc/cp-schema-registry:5.5.0
- ports:
- - 18085:8085
- depends_on:
- - zookeeper1
- - kafka1
- environment:
- SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
- SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
- SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
- SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
-
- SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
- SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
- SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
+ - 8085:8085
kafka-connect0:
- image: confluentinc/cp-kafka-connect:6.0.1
+ image: confluentinc/cp-kafka-connect:7.2.1
ports:
- 8083:8083
depends_on:
@@ -131,16 +71,14 @@ services:
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
-
kafka-init-topics:
- image: confluentinc/cp-kafka:5.3.1
+ image: confluentinc/cp-kafka:7.2.1
volumes:
- - ./message.json:/data/message.json
+ - ./message.json:/data/message.json
depends_on:
- - kafka1
+ - kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \
- cub kafka-ready -b kafka1:29092 1 30 && \
- kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
+ cub kafka-ready -b kafka0:29092 1 30 && \
+ kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
diff --git a/documentation/compose/kafka-ssl.yml b/documentation/compose/kafka-ssl.yml
index 2b6dae855a..4fc7daebff 100644
--- a/documentation/compose/kafka-ssl.yml
+++ b/documentation/compose/kafka-ssl.yml
@@ -7,13 +7,11 @@ services:
ports:
- 8080:8080
depends_on:
- - zookeeper0
- - kafka0
+ - kafka
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 # SSL LISTENER!
- KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # SSL LISTENER!
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
@@ -23,28 +21,30 @@ services:
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
- zookeeper0:
- image: confluentinc/cp-zookeeper:6.0.1
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
+ kafka:
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka
+ container_name: kafka
ports:
- - 2181:2181
-
- kafka0:
- image: confluentinc/cp-kafka:6.0.1
- hostname: kafka0
- depends_on:
- - zookeeper0
- ports:
- - '9092:9092'
+ - "9092:9092"
+ - "9997:9997"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SSL:SSL,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'SSL://kafka:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- KAFKA_ADVERTISED_LISTENERS: SSL://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: SSL:SSL,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: SSL
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: localhost
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
+ KAFKA_LISTENERS: 'SSL://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'SSL'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
KAFKA_SECURITY_PROTOCOL: SSL
KAFKA_SSL_ENABLED_MECHANISMS: PLAIN,SSL
KAFKA_SSL_KEYSTORE_FILENAME: kafka.keystore.jks
@@ -56,6 +56,8 @@ services:
KAFKA_SSL_CLIENT_AUTH: 'requested'
KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # COMMON NAME VERIFICATION IS DISABLED SERVER-SIDE
volumes:
+ - ./scripts/update_run.sh:/tmp/update_run.sh
- ./ssl/creds:/etc/kafka/secrets/creds
- ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
- ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
\ No newline at end of file
diff --git a/documentation/compose/kafka-ui-arm64.yaml b/documentation/compose/kafka-ui-arm64.yaml
index 70134c6b52..bbcefecbf4 100644
--- a/documentation/compose/kafka-ui-arm64.yaml
+++ b/documentation/compose/kafka-ui-arm64.yaml
@@ -1,5 +1,3 @@
-# This compose file uses kafka cluster without zookeeper
-# Kafka without zookeeper is supported after image tag 6.2.0
# ARM64 supported images for kafka can be found here
# https://hub.docker.com/r/confluentinc/cp-kafka/tags?page=1&name=arm64
---
@@ -12,18 +10,18 @@ services:
- 8080:8080
depends_on:
- kafka0
- - schemaregistry0
+ - schema-registry0
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
- KAFKA_CLUSTERS_0_JMXPORT: 9997
- KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
+ KAFKA_CLUSTERS_0_METRICS_PORT: 9997
+ KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schema-registry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
kafka0:
- image: confluentinc/cp-kafka:7.0.5.arm64
+ image: confluentinc/cp-kafka:7.2.1.arm64
hostname: kafka0
container_name: kafka0
ports:
@@ -44,14 +42,14 @@ services:
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
- JMX_PORT: 9997
+ KAFKA_JMX_PORT: 9997
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
volumes:
- ./scripts/update_run.sh:/tmp/update_run.sh
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
- schemaregistry0:
- image: confluentinc/cp-schema-registry:7.0.5.arm64
+ schema-registry0:
+ image: confluentinc/cp-schema-registry:7.2.1.arm64
ports:
- 8085:8085
depends_on:
@@ -59,20 +57,20 @@ services:
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
- SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
- SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
+ SCHEMA_REGISTRY_HOST_NAME: schema-registry0
+ SCHEMA_REGISTRY_LISTENERS: http://schema-registry0:8085
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
- image: confluentinc/cp-kafka-connect:7.0.5.arm64
+ image: confluentinc/cp-kafka-connect:7.2.1.arm64
ports:
- 8083:8083
depends_on:
- kafka0
- - schemaregistry0
+ - schema-registry0
environment:
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
CONNECT_GROUP_ID: compose-connect-group
@@ -83,16 +81,16 @@ services:
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
- CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
+ CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry0:8085
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
- CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
+ CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry0:8085
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
kafka-init-topics:
- image: confluentinc/cp-kafka:7.0.5.arm64
+ image: confluentinc/cp-kafka:7.2.1.arm64
volumes:
- ./message.json:/data/message.json
depends_on:
@@ -102,4 +100,4 @@ services:
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
- kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
+ kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"
diff --git a/documentation/compose/kafka-ui-auth-context.yaml b/documentation/compose/kafka-ui-auth-context.yaml
index fac1af0aa2..69eebbfeeb 100644
--- a/documentation/compose/kafka-ui-auth-context.yaml
+++ b/documentation/compose/kafka-ui-auth-context.yaml
@@ -8,52 +8,40 @@ services:
ports:
- 8080:8080
depends_on:
- - zookeeper0
- - kafka0
+ - kafka
environment:
KAFKA_CLUSTERS_0_NAME: local
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
+ KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
SERVER_SERVLET_CONTEXT_PATH: /kafkaui
AUTH_TYPE: "LOGIN_FORM"
SPRING_SECURITY_USER_NAME: admin
SPRING_SECURITY_USER_PASSWORD: pass
- zookeeper0:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
+ kafka:
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka
+ container_name: kafka
ports:
- - 2181:2181
-
- kafka0:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper0
- ports:
- - 9092:9092
- - 9997:9997
+ - "9092:9092"
+ - "9997:9997"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- JMX_PORT: 9997
- KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
-
- kafka-init-topics:
- image: confluentinc/cp-kafka:5.3.1
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: localhost
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
volumes:
- - ./message.json:/data/message.json
- depends_on:
- - kafka0
- command: "bash -c 'echo Waiting for Kafka to be ready... && \
- cub kafka-ready -b kafka0:29092 1 30 && \
- kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
\ No newline at end of file
diff --git a/documentation/compose/kafka-ui-connectors-auth.yaml b/documentation/compose/kafka-ui-connectors-auth.yaml
index f6a6199802..b34c9d8086 100644
--- a/documentation/compose/kafka-ui-connectors-auth.yaml
+++ b/documentation/compose/kafka-ui-connectors-auth.yaml
@@ -1,68 +1,62 @@
---
-version: '2'
+version: "2"
services:
-
kafka-ui:
container_name: kafka-ui
image: provectuslabs/kafka-ui:latest
ports:
- 8080:8080
depends_on:
- - zookeeper0
- - zookeeper1
- kafka0
- - kafka1
- schemaregistry0
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME: admin
KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD: admin-secret
- KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
-
- zookeeper0:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2181:2181
kafka0:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper0
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka0
+ container_name: kafka0
ports:
- - 9092:9092
- - 9997:9997
+ - "9092:9092"
+ - "9997:9997"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
+ KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
- JMX_PORT: 9997
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: localhost
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
+ KAFKA_PROCESS_ROLES: "broker,controller"
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
+ KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
+ KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
+ KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
+ KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
+ volumes:
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
schemaregistry0:
- image: confluentinc/cp-schema-registry:5.5.0
+ image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
- - zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
@@ -71,7 +65,6 @@ services:
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
-
kafka-connect0:
build:
context: ./kafka-connect
@@ -105,47 +98,17 @@ services:
CONNECT_REST_EXTENSION_CLASSES: "org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension"
KAFKA_OPTS: "-Djava.security.auth.login.config=/conf/kafka_connect.jaas"
-# AWS_ACCESS_KEY_ID: ""
-# AWS_SECRET_ACCESS_KEY: ""
+ # AWS_ACCESS_KEY_ID: ""
+ # AWS_SECRET_ACCESS_KEY: ""
kafka-init-topics:
- image: confluentinc/cp-kafka:5.3.1
+ image: confluentinc/cp-kafka:7.2.1
volumes:
- ./message.json:/data/message.json
- depends_on:
- - kafka1
- command: "bash -c 'echo Waiting for Kafka to be ready... && \
- cub kafka-ready -b kafka1:29092 1 30 && \
- kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
-
- create-connectors:
- image: ellerbrock/alpine-bash-curl-ssl
- depends_on:
- - postgres-db
- - kafka-connect0
- volumes:
- - ./connectors:/connectors
- command: bash -c '/connectors/start.sh'
-
- ksqldb:
- image: confluentinc/ksqldb-server:0.18.0
depends_on:
- kafka0
- - kafka-connect0
- - schemaregistry0
- ports:
- - 8088:8088
- environment:
- KSQL_CUB_KAFKA_TIMEOUT: 120
- KSQL_LISTENERS: http://0.0.0.0:8088
- KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
- KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
- KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
- KSQL_KSQL_CONNECT_URL: http://kafka-connect0:8083
- KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
- KSQL_KSQL_SERVICE_ID: my_ksql_1
- KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
- KSQL_CACHE_MAX_BYTES_BUFFERING: 0
+ command: "bash -c 'echo Waiting for Kafka to be ready... && \
+ cub kafka-ready -b kafka0:29092 1 30 && \
+ kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
diff --git a/documentation/compose/kafka-ui-jmx-secured.yml b/documentation/compose/kafka-ui-jmx-secured.yml
index 71f61b1e55..de56a7e2c6 100644
--- a/documentation/compose/kafka-ui-jmx-secured.yml
+++ b/documentation/compose/kafka-ui-jmx-secured.yml
@@ -9,14 +9,12 @@ services:
- 8080:8080
- 5005:5005
depends_on:
- - zookeeper0
- kafka0
- schemaregistry0
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
@@ -34,29 +32,29 @@ services:
- ./jmx/clienttruststore:/jmx/clienttruststore
- ./jmx/clientkeystore:/jmx/clientkeystore
- zookeeper0:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2181:2181
-
kafka0:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper0
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka0
+ container_name: kafka0
ports:
- 9092:9092
- 9997:9997
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- JMX_PORT: 9997
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9997
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
# CHMOD 700 FOR JMXREMOTE.* FILES
KAFKA_JMX_OPTS: >-
-Dcom.sun.management.jmxremote
@@ -75,21 +73,21 @@ services:
-Djava.rmi.server.logCalls=true
# -Djavax.net.debug=ssl:handshake
volumes:
- - ./jmx/serverkeystore:/jmx/serverkeystore
- - ./jmx/servertruststore:/jmx/servertruststore
- - ./jmx/jmxremote.password:/jmx/jmxremote.password
- - ./jmx/jmxremote.access:/jmx/jmxremote.access
+ - ./jmx/serverkeystore:/jmx/serverkeystore
+ - ./jmx/servertruststore:/jmx/servertruststore
+ - ./jmx/jmxremote.password:/jmx/jmxremote.password
+ - ./jmx/jmxremote.access:/jmx/jmxremote.access
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
- image: confluentinc/cp-schema-registry:5.5.0
+ image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
- - zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
@@ -99,7 +97,7 @@ services:
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
kafka-connect0:
- image: confluentinc/cp-kafka-connect:6.0.1
+ image: confluentinc/cp-kafka-connect:7.2.1
ports:
- 8083:8083
depends_on:
@@ -124,13 +122,13 @@ services:
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
kafka-init-topics:
- image: confluentinc/cp-kafka:5.3.1
+ image: confluentinc/cp-kafka:7.2.1
volumes:
- ./message.json:/data/message.json
depends_on:
- kafka0
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka0:29092 1 30 && \
- kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
+ kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"
\ No newline at end of file
diff --git a/documentation/compose/kafka-ui-with-jmx-exporter.yaml b/documentation/compose/kafka-ui-with-jmx-exporter.yaml
index e6d21584a7..b0d940694b 100644
--- a/documentation/compose/kafka-ui-with-jmx-exporter.yaml
+++ b/documentation/compose/kafka-ui-with-jmx-exporter.yaml
@@ -2,33 +2,33 @@
version: '2'
services:
- zookeeper0:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2181:2181
-
kafka0:
- image: confluentinc/cp-kafka:5.3.1
- # downloading jmx_exporter javaagent and starting kafka
- command: "/usr/share/jmx_exporter/kafka-prepare-and-run"
- depends_on:
- - zookeeper0
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka0
+ container_name: kafka0
+ ports:
+ - "9092:9092"
+ - "11001:11001"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
KAFKA_OPTS: -javaagent:/usr/share/jmx_exporter/jmx_prometheus_javaagent.jar=11001:/usr/share/jmx_exporter/kafka-broker.yml
- ports:
- - 9092:9092
- - 11001:11001
volumes:
- ./jmx-exporter:/usr/share/jmx_exporter/
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /usr/share/jmx_exporter/kafka-prepare-and-run ; fi'"
kafka-ui:
container_name: kafka-ui
@@ -36,7 +36,6 @@ services:
ports:
- 8080:8080
depends_on:
- - zookeeper0
- kafka0
environment:
KAFKA_CLUSTERS_0_NAME: local
diff --git a/documentation/compose/kafka-ui.yaml b/documentation/compose/kafka-ui.yaml
index ff808ae1ab..790cbfc020 100644
--- a/documentation/compose/kafka-ui.yaml
+++ b/documentation/compose/kafka-ui.yaml
@@ -8,86 +8,89 @@ services:
ports:
- 8080:8080
depends_on:
- - zookeeper0
- - zookeeper1
- kafka0
- kafka1
- schemaregistry0
+ - schemaregistry1
- kafka-connect0
environment:
KAFKA_CLUSTERS_0_NAME: local
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
- KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
KAFKA_CLUSTERS_1_NAME: secondLocal
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
- KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
- KAFKA_CLUSTERS_1_METRICS_PORT: 9998
+ KAFKA_CLUSTERS_0_METRICS_PORT: 9998
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
- KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
- KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
-
- zookeeper0:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
- ports:
- - 2181:2181
kafka0:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper0
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka0
+ container_name: kafka0
ports:
- - 9092:9092
- - 9997:9997
+ - "9092:9092"
+ - "9997:9997"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- JMX_PORT: 9997
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: localhost
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
-
- zookeeper1:
- image: confluentinc/cp-zookeeper:5.2.4
- environment:
- ZOOKEEPER_CLIENT_PORT: 2181
- ZOOKEEPER_TICK_TIME: 2000
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
+ volumes:
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
kafka1:
- image: confluentinc/cp-kafka:5.3.1
- depends_on:
- - zookeeper1
+ image: confluentinc/cp-kafka:7.2.1
+ hostname: kafka1
+ container_name: kafka1
ports:
- - 9093:9093
- - 9998:9998
+ - "9093:9092"
+ - "9998:9998"
environment:
KAFKA_BROKER_ID: 1
- KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
- KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
- KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
+ KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
- JMX_PORT: 9998
- KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9998
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9998
+ KAFKA_JMX_HOSTNAME: localhost
+ KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998
+ KAFKA_PROCESS_ROLES: 'broker,controller'
+ KAFKA_NODE_ID: 1
+ KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093'
+ KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092'
+ KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
+ KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
+ volumes:
+ - ./scripts/update_run.sh:/tmp/update_run.sh
+ command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
schemaregistry0:
- image: confluentinc/cp-schema-registry:5.5.0
+ image: confluentinc/cp-schema-registry:7.2.1
ports:
- 8085:8085
depends_on:
- - zookeeper0
- kafka0
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
@@ -97,15 +100,13 @@ services:
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
schemaregistry1:
- image: confluentinc/cp-schema-registry:5.5.0
+ image: confluentinc/cp-schema-registry:7.2.1
ports:
- 18085:8085
depends_on:
- - zookeeper1
- kafka1
environment:
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
- SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
@@ -140,14 +141,14 @@ services:
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
kafka-init-topics:
- image: confluentinc/cp-kafka:5.3.1
+ image: confluentinc/cp-kafka:7.2.1
volumes:
- ./message.json:/data/message.json
depends_on:
- kafka1
command: "bash -c 'echo Waiting for Kafka to be ready... && \
cub kafka-ready -b kafka1:29092 1 30 && \
- kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
- kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
- kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
+ kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
+ kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
+ kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
+ kafka-console-producer --bootstrap-server kafka1:29092 -topic second.users < /data/message.json'"
diff --git a/documentation/compose/kafka-with-zookeeper.yaml b/documentation/compose/kafka-with-zookeeper.yaml
new file mode 100644
index 0000000000..30e6f17eaf
--- /dev/null
+++ b/documentation/compose/kafka-with-zookeeper.yaml
@@ -0,0 +1,48 @@
+---
+version: '2'
+services:
+
+ zookeeper:
+ image: confluentinc/cp-zookeeper:7.2.1
+ hostname: zookeeper
+ container_name: zookeeper
+ ports:
+ - "2181:2181"
+ environment:
+ ZOOKEEPER_CLIENT_PORT: 2181
+ ZOOKEEPER_TICK_TIME: 2000
+
+ kafka:
+ image: confluentinc/cp-server:7.2.1
+ hostname: kafka
+ container_name: kafka
+ depends_on:
+ - zookeeper
+ ports:
+ - "9092:9092"
+ - "9997:9997"
+ environment:
+ KAFKA_BROKER_ID: 1
+ KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
+ KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
+ KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
+ KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
+ KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
+ KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
+ KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
+ KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
+ KAFKA_JMX_PORT: 9997
+ KAFKA_JMX_HOSTNAME: kafka
+
+ kafka-init-topics:
+ image: confluentinc/cp-kafka:7.2.1
+ volumes:
+ - ./message.json:/data/message.json
+ depends_on:
+ - kafka
+ command: "bash -c 'echo Waiting for Kafka to be ready... && \
+ cub kafka-ready -b kafka:29092 1 30 && \
+ kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 && \
+ kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 && \
+ kafka-console-producer --bootstrap-server kafka:29092 --topic users < /data/message.json'"
diff --git a/documentation/compose/scripts/clusterID b/documentation/compose/scripts/clusterID
new file mode 100644
index 0000000000..4417a5a68d
--- /dev/null
+++ b/documentation/compose/scripts/clusterID
@@ -0,0 +1 @@
+zlFiTJelTOuhnklFwLWixw
\ No newline at end of file
diff --git a/documentation/compose/scripts/create_cluster_id.sh b/documentation/compose/scripts/create_cluster_id.sh
new file mode 100644
index 0000000000..d946fbc4af
--- /dev/null
+++ b/documentation/compose/scripts/create_cluster_id.sh
@@ -0,0 +1 @@
+kafka-storage random-uuid > /workspace/kafka-ui/documentation/compose/clusterID
\ No newline at end of file
diff --git a/documentation/compose/scripts/update_run_cluster.sh b/documentation/compose/scripts/update_run_cluster.sh
new file mode 100644
index 0000000000..31da333aae
--- /dev/null
+++ b/documentation/compose/scripts/update_run_cluster.sh
@@ -0,0 +1,11 @@
+# This script is required to run kafka cluster (without zookeeper)
+#!/bin/sh
+
+# Docker workaround: Remove check for KAFKA_ZOOKEEPER_CONNECT parameter
+sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure
+
+# Docker workaround: Ignore cub zk-ready
+sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure
+
+# KRaft required step: Format the storage directory with a new cluster ID
+echo "kafka-storage format --ignore-formatted -t $(cat /tmp/clusterID) -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure
\ No newline at end of file
diff --git a/documentation/guides/SSO.md b/documentation/guides/SSO.md
index 1ddfab2c7f..f0fae959bd 100644
--- a/documentation/guides/SSO.md
+++ b/documentation/guides/SSO.md
@@ -58,7 +58,6 @@ For Azure AD (Office365) OAUTH2 you'll want to add additional environment variab
docker run -p 8080:8080 \
-e KAFKA_CLUSTERS_0_NAME="${cluster_name}"\
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS="${kafka_listeners}" \
- -e KAFKA_CLUSTERS_0_ZOOKEEPER="${zookeeper_servers}" \
-e KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS="${kafka_connect_servers}"
-e AUTH_TYPE=OAUTH2 \
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
diff --git a/kafka-ui-api/pom.xml b/kafka-ui-api/pom.xml
index ab5fa62bd1..65ab22682f 100644
--- a/kafka-ui-api/pom.xml
+++ b/kafka-ui-api/pom.xml
@@ -173,6 +173,12 @@
${mockito.version}
test
+
+ net.bytebuddy
+ byte-buddy
+ ${byte-buddy.version}
+ test
+
org.assertj
assertj-core
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KsqlClient.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KsqlClient.java
deleted file mode 100644
index 8d051234ab..0000000000
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/client/KsqlClient.java
+++ /dev/null
@@ -1,53 +0,0 @@
-package com.provectus.kafka.ui.client;
-
-import com.fasterxml.jackson.databind.JsonNode;
-import com.fasterxml.jackson.databind.ObjectMapper;
-import com.provectus.kafka.ui.exception.UnprocessableEntityException;
-import com.provectus.kafka.ui.model.KafkaCluster;
-import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
-import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
-import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
-import lombok.RequiredArgsConstructor;
-import lombok.SneakyThrows;
-import lombok.extern.slf4j.Slf4j;
-import org.springframework.http.HttpStatus;
-import org.springframework.http.MediaType;
-import org.springframework.stereotype.Service;
-import org.springframework.web.reactive.function.BodyInserters;
-import org.springframework.web.reactive.function.client.ClientResponse;
-import org.springframework.web.reactive.function.client.WebClient;
-import reactor.core.publisher.Mono;
-
-@Service
-@RequiredArgsConstructor
-@Slf4j
-public class KsqlClient {
- private final WebClient webClient;
- private final ObjectMapper mapper;
-
- public Mono execute(BaseStrategy ksqlStatement, KafkaCluster cluster) {
- return webClient.post()
- .uri(ksqlStatement.getUri())
- .headers(httpHeaders -> KsqlApiClient.setBasicAuthIfEnabled(httpHeaders, cluster))
- .accept(new MediaType("application", "vnd.ksql.v1+json"))
- .body(BodyInserters.fromValue(ksqlStatement.getKsqlCommand()))
- .retrieve()
- .onStatus(HttpStatus::isError, this::getErrorMessage)
- .bodyToMono(byte[].class)
- .map(this::toJson)
- .map(ksqlStatement::serializeResponse);
- }
-
- private Mono getErrorMessage(ClientResponse response) {
- return response
- .bodyToMono(byte[].class)
- .map(this::toJson)
- .map(jsonNode -> jsonNode.get("message").asText())
- .flatMap(error -> Mono.error(new UnprocessableEntityException(error)));
- }
-
- @SneakyThrows
- private JsonNode toJson(byte[] content) {
- return this.mapper.readTree(content);
- }
-}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
index c0d4d0296c..4bba879b01 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/ConsumerGroupsController.java
@@ -53,17 +53,6 @@ public class ConsumerGroupsController extends AbstractController implements Cons
.map(ResponseEntity::ok);
}
-
- @Override
- public Mono>> getConsumerGroups(String clusterName,
- ServerWebExchange exchange) {
- return consumerGroupService.getAllConsumerGroups(getCluster(clusterName))
- .map(Flux::fromIterable)
- .map(f -> f.map(ConsumerGroupMapper::toDto))
- .map(ResponseEntity::ok)
- .switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
- }
-
@Override
public Mono>> getTopicConsumerGroups(
String clusterName, String topicName, ServerWebExchange exchange) {
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/InfoController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/InfoController.java
index 66e5d70bd3..cdda3d0953 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/InfoController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/InfoController.java
@@ -17,9 +17,16 @@ public class InfoController extends AbstractController implements TimeStampForma
@Value("${timestamp.format:dd.MM.YYYY HH:mm:ss}")
private String timeStampFormat;
+ @Value("${timestamp.format:DD.MM.YYYY HH:mm:ss}")
+ private String timeStampFormatIso;
@Override
public Mono> getTimeStampFormat(ServerWebExchange exchange) {
return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormat)));
}
+
+ @Override
+ public Mono> getTimeStampFormatISO(ServerWebExchange exchange) {
+ return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormatIso)));
+ }
}
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java
index 62dc24fab2..c3d833e2b8 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/KsqlController.java
@@ -1,15 +1,12 @@
package com.provectus.kafka.ui.controller;
import com.provectus.kafka.ui.api.KsqlApi;
-import com.provectus.kafka.ui.model.KsqlCommandDTO;
-import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
import com.provectus.kafka.ui.model.KsqlCommandV2DTO;
import com.provectus.kafka.ui.model.KsqlCommandV2ResponseDTO;
import com.provectus.kafka.ui.model.KsqlResponseDTO;
import com.provectus.kafka.ui.model.KsqlStreamDescriptionDTO;
import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
-import com.provectus.kafka.ui.service.KsqlService;
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
import java.util.List;
import java.util.Map;
@@ -27,17 +24,8 @@ import reactor.core.publisher.Mono;
@RequiredArgsConstructor
@Slf4j
public class KsqlController extends AbstractController implements KsqlApi {
- private final KsqlService ksqlService;
- private final KsqlServiceV2 ksqlServiceV2;
- @Override
- public Mono> executeKsqlCommand(String clusterName,
- Mono
- ksqlCommand,
- ServerWebExchange exchange) {
- return ksqlService.executeKsqlCommand(getCluster(clusterName), ksqlCommand)
- .map(ResponseEntity::ok);
- }
+ private final KsqlServiceV2 ksqlServiceV2;
@Override
public Mono> executeKsql(String clusterName,
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
index fab4bcfd5c..79ae59c3b3 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/controller/MessagesController.java
@@ -5,6 +5,7 @@ import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE;
import static java.util.stream.Collectors.toMap;
import com.provectus.kafka.ui.api.MessagesApi;
+import com.provectus.kafka.ui.exception.ValidationException;
import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
@@ -18,6 +19,7 @@ import com.provectus.kafka.ui.service.MessagesService;
import java.util.List;
import java.util.Map;
import java.util.Optional;
+import javax.annotation.Nullable;
import javax.validation.Valid;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
@@ -63,18 +65,22 @@ public class MessagesController extends AbstractController implements MessagesAp
String keySerde,
String valueSerde,
ServerWebExchange exchange) {
+ seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
+ seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
+ filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
+ int recordsLimit =
+ Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT);
+
var positions = new ConsumerPosition(
- seekType != null ? seekType : SeekTypeDTO.BEGINNING,
- parseSeekTo(topicName, seekTo),
- seekDirection
+ seekType,
+ topicName,
+ parseSeekTo(topicName, seekType, seekTo)
);
- int recordsLimit = Optional.ofNullable(limit)
- .map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT))
- .orElse(DEFAULT_LOAD_RECORD_LIMIT);
return Mono.just(
ResponseEntity.ok(
messagesService.loadMessages(
- getCluster(clusterName), topicName, positions, q, filterQueryType, recordsLimit, keySerde, valueSerde)
+ getCluster(clusterName), topicName, positions, q, filterQueryType,
+ recordsLimit, seekDirection, keySerde, valueSerde)
)
);
}
@@ -92,9 +98,13 @@ public class MessagesController extends AbstractController implements MessagesAp
* The format is [partition]::[offset] for specifying offsets
* or [partition]::[timestamp in millis] for specifying timestamps.
*/
- private Map parseSeekTo(String topic, List seekTo) {
+ @Nullable
+ private Map parseSeekTo(String topic, SeekTypeDTO seekType, List seekTo) {
if (seekTo == null || seekTo.isEmpty()) {
- return Map.of();
+ if (seekType == SeekTypeDTO.LATEST || seekType == SeekTypeDTO.BEGINNING) {
+ return null;
+ }
+ throw new ValidationException("seekTo should be set if seekType is " + seekType);
}
return seekTo.stream()
.map(p -> {
diff --git a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
index 59db425b33..d2012355db 100644
--- a/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
+++ b/kafka-ui-api/src/main/java/com/provectus/kafka/ui/emitter/BackwardRecordEmitter.java
@@ -1,21 +1,18 @@
package com.provectus.kafka.ui.emitter;
+import com.provectus.kafka.ui.model.ConsumerPosition;
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
-import com.provectus.kafka.ui.util.OffsetsSeekBackward;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
-import java.util.Map;
-import java.util.SortedMap;
import java.util.TreeMap;
-import java.util.function.Function;
+import java.util.function.Supplier;
import java.util.stream.Collectors;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.Consumer;
-import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
@@ -29,80 +26,68 @@ public class BackwardRecordEmitter
private static final Duration POLL_TIMEOUT = Duration.ofMillis(200);
- private final Function