merged with master
This commit is contained in:
commit
fcfdc69b45
146 changed files with 1756 additions and 3240 deletions
4
.github/workflows/frontend.yaml
vendored
4
.github/workflows/frontend.yaml
vendored
|
@ -20,11 +20,11 @@ jobs:
|
|||
# Disabling shallow clone is recommended for improving relevancy of reporting
|
||||
fetch-depth: 0
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
- uses: pnpm/action-setup@v2.2.3
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
with:
|
||||
version: 7.4.0
|
||||
- name: Install node
|
||||
uses: actions/setup-node@v3.4.1
|
||||
uses: actions/setup-node@v3.5.1
|
||||
with:
|
||||
node-version: "16.15.0"
|
||||
cache: "pnpm"
|
||||
|
|
25
.github/workflows/helm.yaml
vendored
25
.github/workflows/helm.yaml
vendored
|
@ -12,9 +12,18 @@ jobs:
|
|||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Helm tool installer
|
||||
uses: Azure/setup-helm@v1
|
||||
uses: Azure/setup-helm@v3
|
||||
- name: Setup Kubeval
|
||||
uses: lra/setup-kubeval@v1.0.1
|
||||
#check, was helm version increased in Chart.yaml?
|
||||
- name: Check version
|
||||
shell: bash
|
||||
run: |
|
||||
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}' )
|
||||
echo $helm_version_old
|
||||
echo $helm_version_new
|
||||
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
|
||||
- name: Run kubeval
|
||||
shell: bash
|
||||
run: |
|
||||
|
@ -27,17 +36,3 @@ jobs:
|
|||
echo $version;
|
||||
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
|
||||
done
|
||||
#check, was helm version increased in Chart.yaml?
|
||||
- name: Check version
|
||||
shell: bash
|
||||
run: |
|
||||
git fetch
|
||||
git checkout master
|
||||
helm_version_old=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
git checkout $GITHUB_HEAD_REF
|
||||
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
echo $helm_version_old
|
||||
echo $helm_version_new
|
||||
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
|
||||
|
||||
|
||||
|
|
11
.github/workflows/release-helm.yaml
vendored
11
.github/workflows/release-helm.yaml
vendored
|
@ -19,19 +19,20 @@ jobs:
|
|||
git config user.name github-actions
|
||||
git config user.email github-actions@github.com
|
||||
|
||||
- uses: azure/setup-helm@v1
|
||||
- uses: azure/setup-helm@v3
|
||||
|
||||
- name: add chart #realse helm with new version
|
||||
run: |
|
||||
echo "VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')" >> $GITHUB_ENV
|
||||
MSG=$(helm package charts/kafka-ui)
|
||||
VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||
echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
|
||||
MSG=$(helm package charts/kafka-ui)
|
||||
git fetch origin
|
||||
git stash
|
||||
git checkout -b gh-pages origin/gh-pages
|
||||
helm repo index .
|
||||
git add -f ${MSG##*/} index.yaml
|
||||
git commit -m "release ${{ env.VERSION }}"
|
||||
git commit -m "release ${VERSION}"
|
||||
git push
|
||||
- uses: rickstaa/action-create-tag@v1 #create new tag
|
||||
with:
|
||||
tag: "charts/kafka-ui-${{ env.VERSION }}"
|
||||
tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"
|
||||
|
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
|
@ -7,7 +7,7 @@ jobs:
|
|||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
- uses: actions/stale@v6
|
||||
with:
|
||||
days-before-issue-stale: 7
|
||||
days-before-issue-close: 3
|
||||
|
|
|
@ -2,6 +2,6 @@ apiVersion: v2
|
|||
name: kafka-ui
|
||||
description: A Helm chart for kafka-UI
|
||||
type: application
|
||||
version: 0.4.3
|
||||
appVersion: latest
|
||||
version: 0.4.4
|
||||
appVersion: v0.4.0
|
||||
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
|
||||
|
|
|
@ -18,6 +18,7 @@ spec:
|
|||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||
checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
|
||||
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||
labels:
|
||||
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
|
||||
|
@ -136,4 +137,4 @@ spec:
|
|||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
@ -17,7 +17,6 @@ services:
|
|||
environment:
|
||||
- KAFKA_CLUSTERS_0_NAME=local
|
||||
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
|
||||
- KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
|
||||
```
|
||||
|
||||
* If you prefer UI for Apache Kafka in read only mode
|
||||
|
@ -34,7 +33,6 @@ services:
|
|||
environment:
|
||||
- KAFKA_CLUSTERS_0_NAME=local
|
||||
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
|
||||
- KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
|
||||
- KAFKA_CLUSTERS_0_READONLY=true
|
||||
```
|
||||
|
||||
|
|
|
@ -13,3 +13,4 @@
|
|||
11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
|
||||
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
|
||||
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
|
||||
14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper
|
|
@ -8,24 +8,13 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
KAFKA_CLUSTERS_1_NAME: secondLocal
|
||||
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
||||
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
|
||||
KAFKA_CLUSTERS_1_METRICS_PORT: 9998
|
||||
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
AUTH_TYPE: "LDAP"
|
||||
SPRING_LDAP_URLS: "ldap://ldap:10389"
|
||||
SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
|
||||
|
@ -47,41 +36,43 @@ services:
|
|||
image: rroemhild/test-openldap:latest
|
||||
hostname: "ldap"
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9997:9997
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9997
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
ports:
|
||||
- 8085:8085
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
|
|
@ -8,57 +8,55 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
- kafka-connect0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9997:9997
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9997
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
ports:
|
||||
- 8085:8085
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
@ -98,17 +96,16 @@ services:
|
|||
# AWS_SECRET_ACCESS_KEY: ""
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
|
||||
|
||||
postgres-db:
|
||||
build:
|
||||
|
|
|
@ -2,43 +2,44 @@
|
|||
version: '2'
|
||||
services:
|
||||
|
||||
zookeeper1:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2182:2181
|
||||
|
||||
kafka1:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka1
|
||||
container_name: kafka1
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9998
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
|
||||
ports:
|
||||
- 9093:9093
|
||||
- 9998:9998
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
||||
schemaregistry1:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
ports:
|
||||
- 18085:8085
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
- kafka1
|
||||
volumes:
|
||||
- ./jaas:/conf
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
||||
|
@ -54,13 +55,29 @@ services:
|
|||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
||||
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
|
||||
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka1:29092 --topic users < /data/message.json'"
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- kafka1
|
||||
- schemaregistry1
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka1:29092
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
|
|
@ -1,83 +1,41 @@
|
|||
---
|
||||
version: '2'
|
||||
version: "2"
|
||||
services:
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
|
||||
JMX_PORT: 9997
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9997:9997
|
||||
|
||||
kafka01:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 2
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka01:29092,PLAINTEXT_HOST://localhost:9094
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,PLAIN:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
|
||||
JMX_PORT: 9999
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9999
|
||||
ports:
|
||||
- 9094:9094
|
||||
- 9999:9999
|
||||
|
||||
zookeeper1:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2182:2181
|
||||
|
||||
kafka1:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
|
||||
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9998
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
|
||||
ports:
|
||||
- 9093:9093
|
||||
- 9998:9998
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_PROCESS_ROLES: "broker,controller"
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
|
||||
KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
|
||||
KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
|
||||
volumes:
|
||||
- ./scripts/update_run_cluster.sh:/tmp/update_run.sh
|
||||
- ./scripts/clusterID:/tmp/clusterID
|
||||
command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
- kafka01
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092,PLAINTEXT://kafka01:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
@ -86,28 +44,10 @@ services:
|
|||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
ports:
|
||||
- 8085:8085
|
||||
|
||||
schemaregistry1:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
ports:
|
||||
- 18085:8085
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
- kafka1
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
||||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
- 8085:8085
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:6.0.1
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
|
@ -131,16 +71,14 @@ services:
|
|||
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
|
||||
|
|
|
@ -7,13 +7,11 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
- kafka
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 # SSL LISTENER!
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # SSL LISTENER!
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
|
||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
|
||||
|
@ -23,28 +21,30 @@ services:
|
|||
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
|
||||
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:6.0.1
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
kafka:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka
|
||||
container_name: kafka
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:6.0.1
|
||||
hostname: kafka0
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
ports:
|
||||
- '9092:9092'
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SSL:SSL,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'SSL://kafka:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_ADVERTISED_LISTENERS: SSL://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: SSL:SSL,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: SSL
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
|
||||
KAFKA_LISTENERS: 'SSL://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'SSL'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
KAFKA_SECURITY_PROTOCOL: SSL
|
||||
KAFKA_SSL_ENABLED_MECHANISMS: PLAIN,SSL
|
||||
KAFKA_SSL_KEYSTORE_FILENAME: kafka.keystore.jks
|
||||
|
@ -56,6 +56,8 @@ services:
|
|||
KAFKA_SSL_CLIENT_AUTH: 'requested'
|
||||
KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # COMMON NAME VERIFICATION IS DISABLED SERVER-SIDE
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
- ./ssl/creds:/etc/kafka/secrets/creds
|
||||
- ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
|
||||
- ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
|
@ -1,5 +1,3 @@
|
|||
# This compose file uses kafka cluster without zookeeper
|
||||
# Kafka without zookeeper is supported after image tag 6.2.0
|
||||
# ARM64 supported images for kafka can be found here
|
||||
# https://hub.docker.com/r/confluentinc/cp-kafka/tags?page=1&name=arm64
|
||||
---
|
||||
|
@ -12,18 +10,18 @@ services:
|
|||
- 8080:8080
|
||||
depends_on:
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
- schema-registry0
|
||||
- kafka-connect0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_JMXPORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schema-registry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:7.0.5.arm64
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
|
@ -44,14 +42,14 @@ services:
|
|||
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
JMX_PORT: 9997
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:7.0.5.arm64
|
||||
schema-registry0:
|
||||
image: confluentinc/cp-schema-registry:7.2.1.arm64
|
||||
ports:
|
||||
- 8085:8085
|
||||
depends_on:
|
||||
|
@ -59,20 +57,20 @@ services:
|
|||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
SCHEMA_REGISTRY_HOST_NAME: schema-registry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schema-registry0:8085
|
||||
|
||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:7.0.5.arm64
|
||||
image: confluentinc/cp-kafka-connect:7.2.1.arm64
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
- schema-registry0
|
||||
environment:
|
||||
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
|
||||
CONNECT_GROUP_ID: compose-connect-group
|
||||
|
@ -83,16 +81,16 @@ services:
|
|||
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
|
||||
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
|
||||
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry0:8085
|
||||
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry0:8085
|
||||
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.0.5.arm64
|
||||
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
|
@ -102,4 +100,4 @@ services:
|
|||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
|
||||
kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"
|
||||
|
|
|
@ -8,52 +8,40 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
- kafka
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
SERVER_SERVLET_CONTEXT_PATH: /kafkaui
|
||||
AUTH_TYPE: "LOGIN_FORM"
|
||||
SPRING_SECURITY_USER_NAME: admin
|
||||
SPRING_SECURITY_USER_PASSWORD: pass
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
kafka:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka
|
||||
container_name: kafka
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9997:9997
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9997
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
|
@ -1,68 +1,62 @@
|
|||
---
|
||||
version: '2'
|
||||
version: "2"
|
||||
services:
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
image: provectuslabs/kafka-ui:latest
|
||||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- zookeeper1
|
||||
- kafka0
|
||||
- kafka1
|
||||
- schemaregistry0
|
||||
- kafka-connect0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME: admin
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD: admin-secret
|
||||
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9997:9997
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
|
||||
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9997
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
KAFKA_PROCESS_ROLES: "broker,controller"
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
|
||||
KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
|
||||
KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
ports:
|
||||
- 8085:8085
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
@ -71,7 +65,6 @@ services:
|
|||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
|
||||
kafka-connect0:
|
||||
build:
|
||||
context: ./kafka-connect
|
||||
|
@ -105,47 +98,17 @@ services:
|
|||
CONNECT_REST_EXTENSION_CLASSES: "org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension"
|
||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/conf/kafka_connect.jaas"
|
||||
|
||||
# AWS_ACCESS_KEY_ID: ""
|
||||
# AWS_SECRET_ACCESS_KEY: ""
|
||||
# AWS_ACCESS_KEY_ID: ""
|
||||
# AWS_SECRET_ACCESS_KEY: ""
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
||||
|
||||
create-connectors:
|
||||
image: ellerbrock/alpine-bash-curl-ssl
|
||||
depends_on:
|
||||
- postgres-db
|
||||
- kafka-connect0
|
||||
volumes:
|
||||
- ./connectors:/connectors
|
||||
command: bash -c '/connectors/start.sh'
|
||||
|
||||
ksqldb:
|
||||
image: confluentinc/ksqldb-server:0.18.0
|
||||
depends_on:
|
||||
- kafka0
|
||||
- kafka-connect0
|
||||
- schemaregistry0
|
||||
ports:
|
||||
- 8088:8088
|
||||
environment:
|
||||
KSQL_CUB_KAFKA_TIMEOUT: 120
|
||||
KSQL_LISTENERS: http://0.0.0.0:8088
|
||||
KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
|
||||
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
|
||||
KSQL_KSQL_CONNECT_URL: http://kafka-connect0:8083
|
||||
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
||||
KSQL_KSQL_SERVICE_ID: my_ksql_1
|
||||
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
|
||||
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
|
||||
|
|
|
@ -9,14 +9,12 @@ services:
|
|||
- 8080:8080
|
||||
- 5005:5005
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
- schemaregistry0
|
||||
- kafka-connect0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
|
@ -34,29 +32,29 @@ services:
|
|||
- ./jmx/clienttruststore:/jmx/clienttruststore
|
||||
- ./jmx/clientkeystore:/jmx/clientkeystore
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9997:9997
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9997
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
# CHMOD 700 FOR JMXREMOTE.* FILES
|
||||
KAFKA_JMX_OPTS: >-
|
||||
-Dcom.sun.management.jmxremote
|
||||
|
@ -75,21 +73,21 @@ services:
|
|||
-Djava.rmi.server.logCalls=true
|
||||
# -Djavax.net.debug=ssl:handshake
|
||||
volumes:
|
||||
- ./jmx/serverkeystore:/jmx/serverkeystore
|
||||
- ./jmx/servertruststore:/jmx/servertruststore
|
||||
- ./jmx/jmxremote.password:/jmx/jmxremote.password
|
||||
- ./jmx/jmxremote.access:/jmx/jmxremote.access
|
||||
- ./jmx/serverkeystore:/jmx/serverkeystore
|
||||
- ./jmx/servertruststore:/jmx/servertruststore
|
||||
- ./jmx/jmxremote.password:/jmx/jmxremote.password
|
||||
- ./jmx/jmxremote.access:/jmx/jmxremote.access
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
ports:
|
||||
- 8085:8085
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
@ -99,7 +97,7 @@ services:
|
|||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
kafka-connect0:
|
||||
image: confluentinc/cp-kafka-connect:6.0.1
|
||||
image: confluentinc/cp-kafka-connect:7.2.1
|
||||
ports:
|
||||
- 8083:8083
|
||||
depends_on:
|
||||
|
@ -124,13 +122,13 @@ services:
|
|||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka0
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"
|
|
@ -2,33 +2,33 @@
|
|||
version: '2'
|
||||
services:
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
# downloading jmx_exporter javaagent and starting kafka
|
||||
command: "/usr/share/jmx_exporter/kafka-prepare-and-run"
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "11001:11001"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
KAFKA_OPTS: -javaagent:/usr/share/jmx_exporter/jmx_prometheus_javaagent.jar=11001:/usr/share/jmx_exporter/kafka-broker.yml
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 11001:11001
|
||||
volumes:
|
||||
- ./jmx-exporter:/usr/share/jmx_exporter/
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /usr/share/jmx_exporter/kafka-prepare-and-run ; fi'"
|
||||
|
||||
kafka-ui:
|
||||
container_name: kafka-ui
|
||||
|
@ -36,7 +36,6 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
|
|
|
@ -8,86 +8,89 @@ services:
|
|||
ports:
|
||||
- 8080:8080
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- zookeeper1
|
||||
- kafka0
|
||||
- kafka1
|
||||
- schemaregistry0
|
||||
- schemaregistry1
|
||||
- kafka-connect0
|
||||
environment:
|
||||
KAFKA_CLUSTERS_0_NAME: local
|
||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
KAFKA_CLUSTERS_1_NAME: secondLocal
|
||||
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
||||
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
|
||||
KAFKA_CLUSTERS_1_METRICS_PORT: 9998
|
||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9998
|
||||
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
|
||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||
|
||||
zookeeper0:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
ports:
|
||||
- 2181:2181
|
||||
|
||||
kafka0:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka0
|
||||
container_name: kafka0
|
||||
ports:
|
||||
- 9092:9092
|
||||
- 9997:9997
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9997
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||
|
||||
zookeeper1:
|
||||
image: confluentinc/cp-zookeeper:5.2.4
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
||||
kafka1:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
hostname: kafka1
|
||||
container_name: kafka1
|
||||
ports:
|
||||
- 9093:9093
|
||||
- 9998:9998
|
||||
- "9093:9092"
|
||||
- "9998:9998"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092'
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
JMX_PORT: 9998
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9998
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9998
|
||||
KAFKA_JMX_HOSTNAME: localhost
|
||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998
|
||||
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||
KAFKA_NODE_ID: 1
|
||||
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093'
|
||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||
volumes:
|
||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||
|
||||
schemaregistry0:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
ports:
|
||||
- 8085:8085
|
||||
depends_on:
|
||||
- zookeeper0
|
||||
- kafka0
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||
|
@ -97,15 +100,13 @@ services:
|
|||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||
|
||||
schemaregistry1:
|
||||
image: confluentinc/cp-schema-registry:5.5.0
|
||||
image: confluentinc/cp-schema-registry:7.2.1
|
||||
ports:
|
||||
- 18085:8085
|
||||
depends_on:
|
||||
- zookeeper1
|
||||
- kafka1
|
||||
environment:
|
||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
||||
|
@ -140,14 +141,14 @@ services:
|
|||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:5.3.1
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka1
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
|
||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
|
||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka1:29092 -topic second.users < /data/message.json'"
|
||||
|
|
48
documentation/compose/kafka-with-zookeeper.yaml
Normal file
48
documentation/compose/kafka-with-zookeeper.yaml
Normal file
|
@ -0,0 +1,48 @@
|
|||
---
|
||||
version: '2'
|
||||
services:
|
||||
|
||||
zookeeper:
|
||||
image: confluentinc/cp-zookeeper:7.2.1
|
||||
hostname: zookeeper
|
||||
container_name: zookeeper
|
||||
ports:
|
||||
- "2181:2181"
|
||||
environment:
|
||||
ZOOKEEPER_CLIENT_PORT: 2181
|
||||
ZOOKEEPER_TICK_TIME: 2000
|
||||
|
||||
kafka:
|
||||
image: confluentinc/cp-server:7.2.1
|
||||
hostname: kafka
|
||||
container_name: kafka
|
||||
depends_on:
|
||||
- zookeeper
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9997:9997"
|
||||
environment:
|
||||
KAFKA_BROKER_ID: 1
|
||||
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
|
||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||
KAFKA_JMX_PORT: 9997
|
||||
KAFKA_JMX_HOSTNAME: kafka
|
||||
|
||||
kafka-init-topics:
|
||||
image: confluentinc/cp-kafka:7.2.1
|
||||
volumes:
|
||||
- ./message.json:/data/message.json
|
||||
depends_on:
|
||||
- kafka
|
||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||
cub kafka-ready -b kafka:29092 1 30 && \
|
||||
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 && \
|
||||
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 && \
|
||||
kafka-console-producer --bootstrap-server kafka:29092 --topic users < /data/message.json'"
|
1
documentation/compose/scripts/clusterID
Normal file
1
documentation/compose/scripts/clusterID
Normal file
|
@ -0,0 +1 @@
|
|||
zlFiTJelTOuhnklFwLWixw
|
1
documentation/compose/scripts/create_cluster_id.sh
Normal file
1
documentation/compose/scripts/create_cluster_id.sh
Normal file
|
@ -0,0 +1 @@
|
|||
kafka-storage random-uuid > /workspace/kafka-ui/documentation/compose/clusterID
|
11
documentation/compose/scripts/update_run_cluster.sh
Normal file
11
documentation/compose/scripts/update_run_cluster.sh
Normal file
|
@ -0,0 +1,11 @@
|
|||
# This script is required to run kafka cluster (without zookeeper)
|
||||
#!/bin/sh
|
||||
|
||||
# Docker workaround: Remove check for KAFKA_ZOOKEEPER_CONNECT parameter
|
||||
sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure
|
||||
|
||||
# Docker workaround: Ignore cub zk-ready
|
||||
sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure
|
||||
|
||||
# KRaft required step: Format the storage directory with a new cluster ID
|
||||
echo "kafka-storage format --ignore-formatted -t $(cat /tmp/clusterID) -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure
|
|
@ -58,7 +58,6 @@ For Azure AD (Office365) OAUTH2 you'll want to add additional environment variab
|
|||
docker run -p 8080:8080 \
|
||||
-e KAFKA_CLUSTERS_0_NAME="${cluster_name}"\
|
||||
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS="${kafka_listeners}" \
|
||||
-e KAFKA_CLUSTERS_0_ZOOKEEPER="${zookeeper_servers}" \
|
||||
-e KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS="${kafka_connect_servers}"
|
||||
-e AUTH_TYPE=OAUTH2 \
|
||||
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
|
||||
|
|
|
@ -173,6 +173,12 @@
|
|||
<version>${mockito.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>net.bytebuddy</groupId>
|
||||
<artifactId>byte-buddy</artifactId>
|
||||
<version>${byte-buddy.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.assertj</groupId>
|
||||
<artifactId>assertj-core</artifactId>
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
package com.provectus.kafka.ui.client;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.SneakyThrows;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.springframework.http.HttpStatus;
|
||||
import org.springframework.http.MediaType;
|
||||
import org.springframework.stereotype.Service;
|
||||
import org.springframework.web.reactive.function.BodyInserters;
|
||||
import org.springframework.web.reactive.function.client.ClientResponse;
|
||||
import org.springframework.web.reactive.function.client.WebClient;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class KsqlClient {
|
||||
private final WebClient webClient;
|
||||
private final ObjectMapper mapper;
|
||||
|
||||
public Mono<KsqlCommandResponseDTO> execute(BaseStrategy ksqlStatement, KafkaCluster cluster) {
|
||||
return webClient.post()
|
||||
.uri(ksqlStatement.getUri())
|
||||
.headers(httpHeaders -> KsqlApiClient.setBasicAuthIfEnabled(httpHeaders, cluster))
|
||||
.accept(new MediaType("application", "vnd.ksql.v1+json"))
|
||||
.body(BodyInserters.fromValue(ksqlStatement.getKsqlCommand()))
|
||||
.retrieve()
|
||||
.onStatus(HttpStatus::isError, this::getErrorMessage)
|
||||
.bodyToMono(byte[].class)
|
||||
.map(this::toJson)
|
||||
.map(ksqlStatement::serializeResponse);
|
||||
}
|
||||
|
||||
private Mono<Throwable> getErrorMessage(ClientResponse response) {
|
||||
return response
|
||||
.bodyToMono(byte[].class)
|
||||
.map(this::toJson)
|
||||
.map(jsonNode -> jsonNode.get("message").asText())
|
||||
.flatMap(error -> Mono.error(new UnprocessableEntityException(error)));
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private JsonNode toJson(byte[] content) {
|
||||
return this.mapper.readTree(content);
|
||||
}
|
||||
}
|
|
@ -53,17 +53,6 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
|||
.map(ResponseEntity::ok);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getConsumerGroups(String clusterName,
|
||||
ServerWebExchange exchange) {
|
||||
return consumerGroupService.getAllConsumerGroups(getCluster(clusterName))
|
||||
.map(Flux::fromIterable)
|
||||
.map(f -> f.map(ConsumerGroupMapper::toDto))
|
||||
.map(ResponseEntity::ok)
|
||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(
|
||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||
|
|
|
@ -17,9 +17,16 @@ public class InfoController extends AbstractController implements TimeStampForma
|
|||
|
||||
@Value("${timestamp.format:dd.MM.YYYY HH:mm:ss}")
|
||||
private String timeStampFormat;
|
||||
@Value("${timestamp.format:DD.MM.YYYY HH:mm:ss}")
|
||||
private String timeStampFormatIso;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TimeStampFormatDTO>> getTimeStampFormat(ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormat)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<TimeStampFormatDTO>> getTimeStampFormatISO(ServerWebExchange exchange) {
|
||||
return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormatIso)));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,15 +1,12 @@
|
|||
package com.provectus.kafka.ui.controller;
|
||||
|
||||
import com.provectus.kafka.ui.api.KsqlApi;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandV2DTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandV2ResponseDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlResponseDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlStreamDescriptionDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
|
||||
import com.provectus.kafka.ui.service.KsqlService;
|
||||
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -27,17 +24,8 @@ import reactor.core.publisher.Mono;
|
|||
@RequiredArgsConstructor
|
||||
@Slf4j
|
||||
public class KsqlController extends AbstractController implements KsqlApi {
|
||||
private final KsqlService ksqlService;
|
||||
private final KsqlServiceV2 ksqlServiceV2;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<KsqlCommandResponseDTO>> executeKsqlCommand(String clusterName,
|
||||
Mono<KsqlCommandDTO>
|
||||
ksqlCommand,
|
||||
ServerWebExchange exchange) {
|
||||
return ksqlService.executeKsqlCommand(getCluster(clusterName), ksqlCommand)
|
||||
.map(ResponseEntity::ok);
|
||||
}
|
||||
private final KsqlServiceV2 ksqlServiceV2;
|
||||
|
||||
@Override
|
||||
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
|
||||
|
|
|
@ -5,6 +5,7 @@ import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE;
|
|||
import static java.util.stream.Collectors.toMap;
|
||||
|
||||
import com.provectus.kafka.ui.api.MessagesApi;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
|
||||
|
@ -18,6 +19,7 @@ import com.provectus.kafka.ui.service.MessagesService;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import javax.annotation.Nullable;
|
||||
import javax.validation.Valid;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
|
@ -63,18 +65,22 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
String keySerde,
|
||||
String valueSerde,
|
||||
ServerWebExchange exchange) {
|
||||
seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
|
||||
seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
|
||||
filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
|
||||
int recordsLimit =
|
||||
Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT);
|
||||
|
||||
var positions = new ConsumerPosition(
|
||||
seekType != null ? seekType : SeekTypeDTO.BEGINNING,
|
||||
parseSeekTo(topicName, seekTo),
|
||||
seekDirection
|
||||
seekType,
|
||||
topicName,
|
||||
parseSeekTo(topicName, seekType, seekTo)
|
||||
);
|
||||
int recordsLimit = Optional.ofNullable(limit)
|
||||
.map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT))
|
||||
.orElse(DEFAULT_LOAD_RECORD_LIMIT);
|
||||
return Mono.just(
|
||||
ResponseEntity.ok(
|
||||
messagesService.loadMessages(
|
||||
getCluster(clusterName), topicName, positions, q, filterQueryType, recordsLimit, keySerde, valueSerde)
|
||||
getCluster(clusterName), topicName, positions, q, filterQueryType,
|
||||
recordsLimit, seekDirection, keySerde, valueSerde)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -92,9 +98,13 @@ public class MessagesController extends AbstractController implements MessagesAp
|
|||
* The format is [partition]::[offset] for specifying offsets
|
||||
* or [partition]::[timestamp in millis] for specifying timestamps.
|
||||
*/
|
||||
private Map<TopicPartition, Long> parseSeekTo(String topic, List<String> seekTo) {
|
||||
@Nullable
|
||||
private Map<TopicPartition, Long> parseSeekTo(String topic, SeekTypeDTO seekType, List<String> seekTo) {
|
||||
if (seekTo == null || seekTo.isEmpty()) {
|
||||
return Map.of();
|
||||
if (seekType == SeekTypeDTO.LATEST || seekType == SeekTypeDTO.BEGINNING) {
|
||||
return null;
|
||||
}
|
||||
throw new ValidationException("seekTo should be set if seekType is " + seekType);
|
||||
}
|
||||
return seekTo.stream()
|
||||
.map(p -> {
|
||||
|
|
|
@ -1,21 +1,18 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
||||
import java.time.Duration;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
@ -29,80 +26,68 @@ public class BackwardRecordEmitter
|
|||
|
||||
private static final Duration POLL_TIMEOUT = Duration.ofMillis(200);
|
||||
|
||||
private final Function<Map<String, Object>, KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final OffsetsSeekBackward offsetsSeek;
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
private final int messagesPerPage;
|
||||
|
||||
public BackwardRecordEmitter(
|
||||
Function<Map<String, Object>, KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
OffsetsSeekBackward offsetsSeek,
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
int messagesPerPage,
|
||||
ConsumerRecordDeserializer recordDeserializer) {
|
||||
super(recordDeserializer);
|
||||
this.offsetsSeek = offsetsSeek;
|
||||
this.consumerPosition = consumerPosition;
|
||||
this.messagesPerPage = messagesPerPage;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
try (KafkaConsumer<Bytes, Bytes> configConsumer = consumerSupplier.apply(Map.of())) {
|
||||
final List<TopicPartition> requestedPartitions =
|
||||
offsetsSeek.getRequestedPartitions(configConsumer);
|
||||
sendPhase(sink, "Request partitions");
|
||||
final int msgsPerPartition = offsetsSeek.msgsPerPartition(requestedPartitions.size());
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer =
|
||||
consumerSupplier.apply(
|
||||
Map.of(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, msgsPerPartition)
|
||||
)
|
||||
) {
|
||||
sendPhase(sink, "Created consumer");
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Created consumer");
|
||||
|
||||
SortedMap<TopicPartition, Long> readUntilOffsets =
|
||||
new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readUntilOffsets.putAll(offsetsSeek.getPartitionsOffsets(consumer));
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var readUntilOffsets = new TreeMap<TopicPartition, Long>(Comparator.comparingInt(TopicPartition::partition));
|
||||
readUntilOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||
|
||||
sendPhase(sink, "Requested partitions offsets");
|
||||
log.debug("partition offsets: {}", readUntilOffsets);
|
||||
var waitingOffsets =
|
||||
offsetsSeek.waitingOffsets(consumer, readUntilOffsets.keySet());
|
||||
log.debug("waiting offsets {} {}",
|
||||
waitingOffsets.getBeginOffsets(),
|
||||
waitingOffsets.getEndOffsets()
|
||||
);
|
||||
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
|
||||
log.debug("'Until' offsets for polling: {}", readUntilOffsets);
|
||||
|
||||
while (!sink.isCancelled() && !waitingOffsets.beginReached()) {
|
||||
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
|
||||
long lowestOffset = waitingOffsets.getBeginOffsets().get(tp.partition());
|
||||
long readFromOffset = Math.max(lowestOffset, readToOffset - msgsPerPartition);
|
||||
|
||||
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
|
||||
.stream()
|
||||
.filter(r -> !sink.isCancelled())
|
||||
.forEach(r -> sendMessage(sink, r));
|
||||
|
||||
waitingOffsets.markPolled(tp.partition(), readFromOffset);
|
||||
if (waitingOffsets.getBeginOffsets().get(tp.partition()) == null) {
|
||||
// we fully read this partition -> removing it from polling iterations
|
||||
readUntilOffsets.remove(tp);
|
||||
} else {
|
||||
readUntilOffsets.put(tp, readFromOffset);
|
||||
}
|
||||
});
|
||||
|
||||
if (waitingOffsets.beginReached()) {
|
||||
log.debug("begin reached after partitions poll iteration");
|
||||
} else if (sink.isCancelled()) {
|
||||
log.debug("sink is cancelled after partitions poll iteration");
|
||||
while (!sink.isCancelled() && !readUntilOffsets.isEmpty()) {
|
||||
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
|
||||
if (sink.isCancelled()) {
|
||||
return; //fast return in case of sink cancellation
|
||||
}
|
||||
long beginOffset = seekOperations.getBeginOffsets().get(tp);
|
||||
long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
|
||||
|
||||
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
|
||||
.stream()
|
||||
.filter(r -> !sink.isCancelled())
|
||||
.forEach(r -> sendMessage(sink, r));
|
||||
|
||||
if (beginOffset == readFromOffset) {
|
||||
// we fully read this partition -> removing it from polling iterations
|
||||
readUntilOffsets.remove(tp);
|
||||
} else {
|
||||
// updating 'to' offset for next polling iteration
|
||||
readUntilOffsets.put(tp, readFromOffset);
|
||||
}
|
||||
});
|
||||
if (readUntilOffsets.isEmpty()) {
|
||||
log.debug("begin reached after partitions poll iteration");
|
||||
} else if (sink.isCancelled()) {
|
||||
log.debug("sink is cancelled after partitions poll iteration");
|
||||
}
|
||||
sink.complete();
|
||||
log.debug("Polling finished");
|
||||
}
|
||||
sink.complete();
|
||||
log.debug("Polling finished");
|
||||
} catch (Exception e) {
|
||||
log.error("Error occurred while consuming records", e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
|
||||
TopicPartition tp,
|
||||
long fromOffset,
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeek;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
|
@ -17,34 +17,38 @@ public class ForwardRecordEmitter
|
|||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final OffsetsSeek offsetsSeek;
|
||||
private final ConsumerPosition position;
|
||||
|
||||
public ForwardRecordEmitter(
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
OffsetsSeek offsetsSeek,
|
||||
ConsumerPosition position,
|
||||
ConsumerRecordDeserializer recordDeserializer) {
|
||||
super(recordDeserializer);
|
||||
this.position = position;
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
this.offsetsSeek = offsetsSeek;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
sendPhase(sink, "Assigning partitions");
|
||||
var waitingOffsets = offsetsSeek.assignAndSeek(consumer);
|
||||
var seekOperations = SeekOperations.create(consumer, position);
|
||||
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||
|
||||
// we use empty polls counting to verify that topic was fully read
|
||||
int emptyPolls = 0;
|
||||
while (!sink.isCancelled() && !waitingOffsets.endReached() && emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT) {
|
||||
while (!sink.isCancelled()
|
||||
&& !seekOperations.assignedPartitionsFullyPolled()
|
||||
&& emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT) {
|
||||
|
||||
sendPhase(sink, "Polling");
|
||||
ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
|
||||
log.info("{} records polled", records.count());
|
||||
emptyPolls = records.isEmpty() ? emptyPolls + 1 : 0;
|
||||
|
||||
for (ConsumerRecord<Bytes, Bytes> msg : records) {
|
||||
if (!sink.isCancelled() && !waitingOffsets.endReached()) {
|
||||
if (!sink.isCancelled()) {
|
||||
sendMessage(sink, msg);
|
||||
waitingOffsets.markPolled(msg);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.Getter;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Slf4j
|
||||
@Getter
|
||||
public class OffsetsInfo {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
|
||||
private final Map<TopicPartition, Long> beginOffsets;
|
||||
private final Map<TopicPartition, Long> endOffsets;
|
||||
|
||||
private final Set<TopicPartition> nonEmptyPartitions = new HashSet<>();
|
||||
private final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
||||
|
||||
public OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||
this(consumer,
|
||||
consumer.partitionsFor(topic).stream()
|
||||
.map(pi -> new TopicPartition(topic, pi.partition()))
|
||||
.collect(Collectors.toList())
|
||||
);
|
||||
}
|
||||
|
||||
public OffsetsInfo(Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> targetPartitions) {
|
||||
this.consumer = consumer;
|
||||
this.beginOffsets = consumer.beginningOffsets(targetPartitions);
|
||||
this.endOffsets = consumer.endOffsets(targetPartitions);
|
||||
endOffsets.forEach((tp, endOffset) -> {
|
||||
var beginningOffset = beginOffsets.get(tp);
|
||||
if (endOffset > beginningOffset) {
|
||||
nonEmptyPartitions.add(tp);
|
||||
} else {
|
||||
emptyPartitions.add(tp);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public boolean assignedPartitionsFullyPolled() {
|
||||
for (var tp: consumer.assignment()) {
|
||||
Preconditions.checkArgument(endOffsets.containsKey(tp));
|
||||
if (endOffsets.get(tp) > consumer.position(tp)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.AccessLevel;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@RequiredArgsConstructor(access = AccessLevel.PACKAGE)
|
||||
class SeekOperations {
|
||||
|
||||
private final Consumer<?, ?> consumer;
|
||||
private final OffsetsInfo offsetsInfo;
|
||||
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
|
||||
|
||||
static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||
OffsetsInfo offsetsInfo;
|
||||
if (consumerPosition.getSeekTo() == null) {
|
||||
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
|
||||
} else {
|
||||
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getSeekTo().keySet());
|
||||
}
|
||||
return new SeekOperations(
|
||||
consumer,
|
||||
offsetsInfo,
|
||||
getOffsetsForSeek(consumer, offsetsInfo, consumerPosition.getSeekType(), consumerPosition.getSeekTo())
|
||||
);
|
||||
}
|
||||
|
||||
void assignAndSeekNonEmptyPartitions() {
|
||||
consumer.assign(offsetsForSeek.keySet());
|
||||
offsetsForSeek.forEach(consumer::seek);
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> getBeginOffsets() {
|
||||
return offsetsInfo.getBeginOffsets();
|
||||
}
|
||||
|
||||
Map<TopicPartition, Long> getEndOffsets() {
|
||||
return offsetsInfo.getEndOffsets();
|
||||
}
|
||||
|
||||
boolean assignedPartitionsFullyPolled() {
|
||||
return offsetsInfo.assignedPartitionsFullyPolled();
|
||||
}
|
||||
|
||||
// Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
|
||||
Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||
return offsetsForSeek;
|
||||
}
|
||||
|
||||
/**
|
||||
* Finds offsets for ConsumerPosition. Note: will return empty map if no offsets found for desired criteria.
|
||||
*/
|
||||
@VisibleForTesting
|
||||
static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
|
||||
OffsetsInfo offsetsInfo,
|
||||
SeekTypeDTO seekType,
|
||||
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||
switch (seekType) {
|
||||
case LATEST:
|
||||
return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case BEGINNING:
|
||||
return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||
case OFFSET:
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
return fixOffsets(offsetsInfo, seekTo);
|
||||
case TIMESTAMP:
|
||||
Preconditions.checkNotNull(offsetsInfo);
|
||||
return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
|
||||
default:
|
||||
throw new IllegalStateException();
|
||||
}
|
||||
}
|
||||
|
||||
private static Map<TopicPartition, Long> fixOffsets(OffsetsInfo offsetsInfo, Map<TopicPartition, Long> offsets) {
|
||||
offsets = new HashMap<>(offsets);
|
||||
offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||
|
||||
Map<TopicPartition, Long> result = new HashMap<>();
|
||||
offsets.forEach((tp, targetOffset) -> {
|
||||
long endOffset = offsetsInfo.getEndOffsets().get(tp);
|
||||
long beginningOffset = offsetsInfo.getBeginOffsets().get(tp);
|
||||
// fixing offsets with min - max bounds
|
||||
if (targetOffset > endOffset) {
|
||||
targetOffset = endOffset;
|
||||
} else if (targetOffset < beginningOffset) {
|
||||
targetOffset = beginningOffset;
|
||||
}
|
||||
result.put(tp, targetOffset);
|
||||
});
|
||||
return result;
|
||||
}
|
||||
|
||||
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
|
||||
Map<TopicPartition, Long> timestamps) {
|
||||
timestamps = new HashMap<>(timestamps);
|
||||
timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||
|
||||
return consumer.offsetsForTimes(timestamps).entrySet().stream()
|
||||
.filter(e -> e.getValue() != null)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
|
||||
}
|
||||
}
|
|
@ -1,8 +1,9 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeek;
|
||||
import java.util.HashMap;
|
||||
import java.util.function.Supplier;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||
|
@ -15,21 +16,21 @@ public class TailingEmitter extends AbstractEmitter
|
|||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||
|
||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||
private final OffsetsSeek offsetsSeek;
|
||||
private final ConsumerPosition consumerPosition;
|
||||
|
||||
public TailingEmitter(ConsumerRecordDeserializer recordDeserializer,
|
||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
OffsetsSeek offsetsSeek) {
|
||||
public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||
ConsumerPosition consumerPosition,
|
||||
ConsumerRecordDeserializer recordDeserializer) {
|
||||
super(recordDeserializer);
|
||||
this.consumerSupplier = consumerSupplier;
|
||||
this.offsetsSeek = offsetsSeek;
|
||||
this.consumerPosition = consumerPosition;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||
log.debug("Starting topic tailing");
|
||||
offsetsSeek.assignAndSeek(consumer);
|
||||
assignAndSeek(consumer);
|
||||
while (!sink.isCancelled()) {
|
||||
sendPhase(sink, "Polling");
|
||||
var polled = poll(sink, consumer);
|
||||
|
@ -40,9 +41,17 @@ public class TailingEmitter extends AbstractEmitter
|
|||
} catch (InterruptException kafkaInterruptException) {
|
||||
sink.complete();
|
||||
} catch (Exception e) {
|
||||
log.error("Error consuming {}", offsetsSeek.getConsumerPosition(), e);
|
||||
log.error("Error consuming {}", consumerPosition, e);
|
||||
sink.error(e);
|
||||
}
|
||||
}
|
||||
|
||||
private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
|
||||
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||
var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end
|
||||
seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions
|
||||
consumer.assign(seekOffsets.keySet());
|
||||
seekOffsets.forEach(consumer::seek);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
package com.provectus.kafka.ui.model;
|
||||
|
||||
import java.util.Map;
|
||||
import javax.annotation.Nullable;
|
||||
import lombok.Value;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
|
||||
@Value
|
||||
public class ConsumerPosition {
|
||||
SeekTypeDTO seekType;
|
||||
Map<TopicPartition, Long> seekTo;
|
||||
SeekDirectionDTO seekDirection;
|
||||
String topic;
|
||||
@Nullable
|
||||
Map<TopicPartition, Long> seekTo; // null if positioning should apply to all tps
|
||||
}
|
||||
|
|
|
@ -13,12 +13,12 @@ public class InternalPartition {
|
|||
private final int inSyncReplicasCount;
|
||||
private final int replicasCount;
|
||||
|
||||
private final long offsetMin;
|
||||
private final long offsetMax;
|
||||
private final Long offsetMin;
|
||||
private final Long offsetMax;
|
||||
|
||||
// from log dir
|
||||
private final long segmentSize;
|
||||
private final long segmentCount;
|
||||
private final Long segmentSize;
|
||||
private final Integer segmentCount;
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -42,9 +42,7 @@ public class InternalTopic {
|
|||
Metrics metrics,
|
||||
InternalLogDirStats logDirInfo) {
|
||||
var topic = InternalTopic.builder();
|
||||
topic.internal(
|
||||
topicDescription.isInternal() || topicDescription.name().startsWith("_")
|
||||
);
|
||||
topic.internal(topicDescription.isInternal());
|
||||
topic.name(topicDescription.name());
|
||||
|
||||
List<InternalPartition> partitions = topicDescription.partitions().stream()
|
||||
|
|
|
@ -49,8 +49,8 @@ public class ConsumerGroupService {
|
|||
var tpsFromGroupOffsets = groupOffsetsMap.values().stream()
|
||||
.flatMap(v -> v.keySet().stream())
|
||||
.collect(Collectors.toSet());
|
||||
// 2. getting end offsets for partitions with in committed offsets
|
||||
return ac.listOffsets(tpsFromGroupOffsets, OffsetSpec.latest())
|
||||
// 2. getting end offsets for partitions with committed offsets
|
||||
return ac.listOffsets(tpsFromGroupOffsets, OffsetSpec.latest(), false)
|
||||
.map(endOffsets ->
|
||||
descriptions.stream()
|
||||
.map(desc -> {
|
||||
|
@ -64,18 +64,11 @@ public class ConsumerGroupService {
|
|||
});
|
||||
}
|
||||
|
||||
@Deprecated // need to migrate to pagination
|
||||
public Mono<List<InternalConsumerGroup>> getAllConsumerGroups(KafkaCluster cluster) {
|
||||
return adminClientService.get(cluster)
|
||||
.flatMap(ac -> describeConsumerGroups(ac, null)
|
||||
.flatMap(descriptions -> getConsumerGroups(ac, descriptions)));
|
||||
}
|
||||
|
||||
public Mono<List<InternalTopicConsumerGroup>> getConsumerGroupsForTopic(KafkaCluster cluster,
|
||||
String topic) {
|
||||
return adminClientService.get(cluster)
|
||||
// 1. getting topic's end offsets
|
||||
.flatMap(ac -> ac.listOffsets(topic, OffsetSpec.latest())
|
||||
.flatMap(ac -> ac.listTopicOffsets(topic, OffsetSpec.latest(), false)
|
||||
.flatMap(endOffsets -> {
|
||||
var tps = new ArrayList<>(endOffsets.keySet());
|
||||
// 2. getting all consumer groups
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import com.provectus.kafka.ui.client.KsqlClient;
|
||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.KsqlDbNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
||||
import java.util.List;
|
||||
import lombok.RequiredArgsConstructor;
|
||||
import org.springframework.stereotype.Service;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@Service
|
||||
@RequiredArgsConstructor
|
||||
public class KsqlService {
|
||||
private final KsqlClient ksqlClient;
|
||||
private final List<BaseStrategy> ksqlStatementStrategies;
|
||||
|
||||
public Mono<KsqlCommandResponseDTO> executeKsqlCommand(KafkaCluster cluster,
|
||||
Mono<KsqlCommandDTO> ksqlCommand) {
|
||||
return Mono.justOrEmpty(cluster)
|
||||
.map(KafkaCluster::getKsqldbServer)
|
||||
.onErrorResume(e -> {
|
||||
Throwable throwable =
|
||||
e instanceof ClusterNotFoundException ? e : new KsqlDbNotFoundException();
|
||||
return Mono.error(throwable);
|
||||
})
|
||||
.flatMap(ksqlServer -> getStatementStrategyForKsqlCommand(ksqlCommand)
|
||||
.map(statement -> statement.host(ksqlServer.getUrl()))
|
||||
)
|
||||
.flatMap(baseStrategy -> ksqlClient.execute(baseStrategy, cluster));
|
||||
}
|
||||
|
||||
private Mono<BaseStrategy> getStatementStrategyForKsqlCommand(
|
||||
Mono<KsqlCommandDTO> ksqlCommand) {
|
||||
return ksqlCommand
|
||||
.map(command -> ksqlStatementStrategies.stream()
|
||||
.filter(s -> s.test(command.getKsql()))
|
||||
.map(s -> s.ksqlCommand(command))
|
||||
.findFirst())
|
||||
.flatMap(Mono::justOrEmpty)
|
||||
.switchIfEmpty(Mono.error(new UnprocessableEntityException("Invalid sql")));
|
||||
}
|
||||
}
|
|
@ -14,12 +14,9 @@ import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
|||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekForward;
|
||||
import com.provectus.kafka.ui.util.ResultSizeLimiter;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Properties;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.function.Predicate;
|
||||
|
@ -68,8 +65,8 @@ public class MessagesService {
|
|||
private Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
|
||||
List<Integer> partitionsToInclude) {
|
||||
return adminClientService.get(cluster).flatMap(ac ->
|
||||
ac.listOffsets(topicName, OffsetSpec.earliest())
|
||||
.zipWith(ac.listOffsets(topicName, OffsetSpec.latest()),
|
||||
ac.listTopicOffsets(topicName, OffsetSpec.earliest(), true)
|
||||
.zipWith(ac.listTopicOffsets(topicName, OffsetSpec.latest(), true),
|
||||
(start, end) ->
|
||||
end.entrySet().stream()
|
||||
.filter(e -> partitionsToInclude.isEmpty()
|
||||
|
@ -129,58 +126,62 @@ public class MessagesService {
|
|||
}
|
||||
|
||||
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
|
||||
ConsumerPosition consumerPosition, String query,
|
||||
ConsumerPosition consumerPosition,
|
||||
@Nullable String query,
|
||||
MessageFilterTypeDTO filterQueryType,
|
||||
int limit,
|
||||
SeekDirectionDTO seekDirection,
|
||||
@Nullable String keySerde,
|
||||
@Nullable String valueSerde) {
|
||||
return withExistingTopic(cluster, topic)
|
||||
.flux()
|
||||
.flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
|
||||
filterQueryType, limit, keySerde, valueSerde));
|
||||
filterQueryType, limit, seekDirection, keySerde, valueSerde));
|
||||
}
|
||||
|
||||
private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
|
||||
String topic,
|
||||
ConsumerPosition consumerPosition,
|
||||
String query,
|
||||
@Nullable String query,
|
||||
MessageFilterTypeDTO filterQueryType,
|
||||
int limit,
|
||||
SeekDirectionDTO seekDirection,
|
||||
@Nullable String keySerde,
|
||||
@Nullable String valueSerde) {
|
||||
|
||||
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
||||
ConsumerRecordDeserializer recordDeserializer =
|
||||
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
||||
if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.FORWARD)) {
|
||||
if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
|
||||
emitter = new ForwardRecordEmitter(
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
new OffsetsSeekForward(topic, consumerPosition),
|
||||
consumerPosition,
|
||||
recordDeserializer
|
||||
);
|
||||
} else if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.BACKWARD)) {
|
||||
} else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
|
||||
emitter = new BackwardRecordEmitter(
|
||||
(Map<String, Object> props) -> consumerGroupService.createConsumer(cluster, props),
|
||||
new OffsetsSeekBackward(topic, consumerPosition, limit),
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
consumerPosition,
|
||||
limit,
|
||||
recordDeserializer
|
||||
);
|
||||
} else {
|
||||
emitter = new TailingEmitter(
|
||||
recordDeserializer,
|
||||
() -> consumerGroupService.createConsumer(cluster),
|
||||
new OffsetsSeekForward(topic, consumerPosition)
|
||||
consumerPosition,
|
||||
recordDeserializer
|
||||
);
|
||||
}
|
||||
return Flux.create(emitter)
|
||||
.filter(getMsgFilter(query, filterQueryType))
|
||||
.takeWhile(createTakeWhilePredicate(consumerPosition, limit))
|
||||
.takeWhile(createTakeWhilePredicate(seekDirection, limit))
|
||||
.subscribeOn(Schedulers.boundedElastic())
|
||||
.share();
|
||||
}
|
||||
|
||||
private Predicate<TopicMessageEventDTO> createTakeWhilePredicate(
|
||||
ConsumerPosition consumerPosition, int limit) {
|
||||
return consumerPosition.getSeekDirection() == SeekDirectionDTO.TAILING
|
||||
SeekDirectionDTO seekDirection, int limit) {
|
||||
return seekDirection == SeekDirectionDTO.TAILING
|
||||
? evt -> true // no limit for tailing
|
||||
: new ResultSizeLimiter(limit);
|
||||
}
|
||||
|
@ -189,8 +190,6 @@ public class MessagesService {
|
|||
if (StringUtils.isEmpty(query)) {
|
||||
return evt -> true;
|
||||
}
|
||||
filterQueryType = Optional.ofNullable(filterQueryType)
|
||||
.orElse(MessageFilterTypeDTO.STRING_CONTAINS);
|
||||
var messageFilter = MessageFilters.createMsgFilter(query, filterQueryType);
|
||||
return evt -> {
|
||||
// we only apply filter for message events
|
||||
|
|
|
@ -47,11 +47,12 @@ public class OffsetsResetService {
|
|||
@Nullable Collection<Integer> partitions,
|
||||
OffsetSpec spec) {
|
||||
if (partitions == null) {
|
||||
return client.listOffsets(topic, spec);
|
||||
return client.listTopicOffsets(topic, spec, true);
|
||||
}
|
||||
return client.listOffsets(
|
||||
partitions.stream().map(idx -> new TopicPartition(topic, idx)).collect(toSet()),
|
||||
spec
|
||||
spec,
|
||||
true
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -84,9 +85,9 @@ public class OffsetsResetService {
|
|||
.collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
|
||||
return checkGroupCondition(cluster, group).flatMap(
|
||||
ac ->
|
||||
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.earliest())
|
||||
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.earliest(), true)
|
||||
.flatMap(earliest ->
|
||||
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.latest())
|
||||
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.latest(), true)
|
||||
.map(latest -> editOffsetsBounds(partitionOffsets, earliest, latest))
|
||||
.flatMap(offsetsToCommit -> resetOffsets(ac, group, offsetsToCommit)))
|
||||
);
|
||||
|
|
|
@ -9,11 +9,15 @@ import com.google.common.collect.ImmutableMap;
|
|||
import com.google.common.collect.Iterators;
|
||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||
import com.provectus.kafka.ui.exception.ValidationException;
|
||||
import com.provectus.kafka.ui.util.MapUtil;
|
||||
import com.provectus.kafka.ui.util.NumberUtil;
|
||||
import com.provectus.kafka.ui.util.annotations.KafkaClientInternalsDependant;
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -25,6 +29,7 @@ import java.util.concurrent.ExecutionException;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import javax.annotation.Nullable;
|
||||
|
@ -51,6 +56,7 @@ import org.apache.kafka.common.KafkaException;
|
|||
import org.apache.kafka.common.KafkaFuture;
|
||||
import org.apache.kafka.common.Node;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.TopicPartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartitionReplica;
|
||||
import org.apache.kafka.common.acl.AclBinding;
|
||||
import org.apache.kafka.common.acl.AclBindingFilter;
|
||||
|
@ -422,21 +428,81 @@ public class ReactiveAdminClient implements Closeable {
|
|||
.all());
|
||||
}
|
||||
|
||||
public Mono<Map<TopicPartition, Long>> listOffsets(String topic,
|
||||
OffsetSpec offsetSpec) {
|
||||
return topicPartitions(topic).flatMap(tps -> listOffsets(tps, offsetSpec));
|
||||
/**
|
||||
* List offset for the topic's partitions and OffsetSpec.
|
||||
* @param failOnUnknownLeader true - throw exception in case of no-leader partitions,
|
||||
* false - skip partitions with no leader
|
||||
*/
|
||||
public Mono<Map<TopicPartition, Long>> listTopicOffsets(String topic,
|
||||
OffsetSpec offsetSpec,
|
||||
boolean failOnUnknownLeader) {
|
||||
return describeTopic(topic)
|
||||
.map(td -> filterPartitionsWithLeaderCheck(List.of(td), p -> true, failOnUnknownLeader))
|
||||
.flatMap(partitions -> listOffsetsUnsafe(partitions, offsetSpec));
|
||||
}
|
||||
|
||||
/**
|
||||
* List offset for the specified partitions and OffsetSpec.
|
||||
* @param failOnUnknownLeader true - throw exception in case of no-leader partitions,
|
||||
* false - skip partitions with no leader
|
||||
*/
|
||||
public Mono<Map<TopicPartition, Long>> listOffsets(Collection<TopicPartition> partitions,
|
||||
OffsetSpec offsetSpec) {
|
||||
//TODO: need to split this into multiple calls if number of target partitions is big
|
||||
return toMono(
|
||||
client.listOffsets(partitions.stream().collect(toMap(tp -> tp, tp -> offsetSpec))).all())
|
||||
.map(offsets -> offsets.entrySet()
|
||||
.stream()
|
||||
// filtering partitions for which offsets were not found
|
||||
.filter(e -> e.getValue().offset() >= 0)
|
||||
.collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())));
|
||||
OffsetSpec offsetSpec,
|
||||
boolean failOnUnknownLeader) {
|
||||
return filterPartitionsWithLeaderCheck(partitions, failOnUnknownLeader)
|
||||
.flatMap(parts -> listOffsetsUnsafe(parts, offsetSpec));
|
||||
}
|
||||
|
||||
private Mono<Collection<TopicPartition>> filterPartitionsWithLeaderCheck(Collection<TopicPartition> partitions,
|
||||
boolean failOnUnknownLeader) {
|
||||
var targetTopics = partitions.stream().map(TopicPartition::topic).collect(Collectors.toSet());
|
||||
return describeTopicsImpl(targetTopics)
|
||||
.map(descriptions ->
|
||||
filterPartitionsWithLeaderCheck(
|
||||
descriptions.values(), partitions::contains, failOnUnknownLeader));
|
||||
}
|
||||
|
||||
private Set<TopicPartition> filterPartitionsWithLeaderCheck(Collection<TopicDescription> topicDescriptions,
|
||||
Predicate<TopicPartition> partitionPredicate,
|
||||
boolean failOnUnknownLeader) {
|
||||
var goodPartitions = new HashSet<TopicPartition>();
|
||||
for (TopicDescription description : topicDescriptions) {
|
||||
for (TopicPartitionInfo partitionInfo : description.partitions()) {
|
||||
TopicPartition topicPartition = new TopicPartition(description.name(), partitionInfo.partition());
|
||||
if (!partitionPredicate.test(topicPartition)) {
|
||||
continue;
|
||||
}
|
||||
if (partitionInfo.leader() != null) {
|
||||
goodPartitions.add(topicPartition);
|
||||
} else if (failOnUnknownLeader) {
|
||||
throw new ValidationException(String.format("Topic partition %s has no leader", topicPartition));
|
||||
}
|
||||
}
|
||||
}
|
||||
return goodPartitions;
|
||||
}
|
||||
|
||||
// 1. NOTE(!): should only apply for partitions with existing leader,
|
||||
// otherwise AdminClient will try to fetch topic metadata, fail and retry infinitely (until timeout)
|
||||
// 2. TODO: check if it is a bug that AdminClient never throws LeaderNotAvailableException and just retrying instead
|
||||
@KafkaClientInternalsDependant
|
||||
public Mono<Map<TopicPartition, Long>> listOffsetsUnsafe(Collection<TopicPartition> partitions,
|
||||
OffsetSpec offsetSpec) {
|
||||
|
||||
Function<Collection<TopicPartition>, Mono<Map<TopicPartition, Long>>> call =
|
||||
parts -> toMono(
|
||||
client.listOffsets(parts.stream().collect(toMap(tp -> tp, tp -> offsetSpec))).all())
|
||||
.map(offsets -> offsets.entrySet().stream()
|
||||
// filtering partitions for which offsets were not found
|
||||
.filter(e -> e.getValue().offset() >= 0)
|
||||
.collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())));
|
||||
|
||||
return partitionCalls(
|
||||
partitions,
|
||||
200,
|
||||
call,
|
||||
(m1, m2) -> ImmutableMap.<TopicPartition, Long>builder().putAll(m1).putAll(m2).build()
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<Collection<AclBinding>> listAcls() {
|
||||
|
@ -455,17 +521,6 @@ public class ReactiveAdminClient implements Closeable {
|
|||
return toMono(client.deleteAcls(filters).all()).then();
|
||||
}
|
||||
|
||||
private Mono<Set<TopicPartition>> topicPartitions(String topic) {
|
||||
return toMono(client.describeTopics(List.of(topic)).all())
|
||||
.map(r -> r.values().stream()
|
||||
.findFirst()
|
||||
.stream()
|
||||
.flatMap(d -> d.partitions().stream())
|
||||
.map(p -> new TopicPartition(topic, p.partition()))
|
||||
.collect(Collectors.toSet())
|
||||
);
|
||||
}
|
||||
|
||||
public Mono<Void> updateBrokerConfigByName(Integer brokerId, String name, String value) {
|
||||
ConfigResource cr = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(brokerId));
|
||||
AlterConfigOp op = new AlterConfigOp(new ConfigEntry(name, value), AlterConfigOp.OpType.SET);
|
||||
|
|
|
@ -138,11 +138,15 @@ public class TopicsService {
|
|||
ReactiveAdminClient ac) {
|
||||
var topicPartitions = descriptions.values().stream()
|
||||
.flatMap(desc ->
|
||||
desc.partitions().stream().map(p -> new TopicPartition(desc.name(), p.partition())))
|
||||
desc.partitions().stream()
|
||||
// list offsets should only be applied to partitions with existing leader
|
||||
// (see ReactiveAdminClient.listOffsetsUnsafe(..) docs)
|
||||
.filter(tp -> tp.leader() != null)
|
||||
.map(p -> new TopicPartition(desc.name(), p.partition())))
|
||||
.collect(toList());
|
||||
|
||||
return ac.listOffsets(topicPartitions, OffsetSpec.earliest())
|
||||
.zipWith(ac.listOffsets(topicPartitions, OffsetSpec.latest()),
|
||||
return ac.listOffsetsUnsafe(topicPartitions, OffsetSpec.earliest())
|
||||
.zipWith(ac.listOffsetsUnsafe(topicPartitions, OffsetSpec.latest()),
|
||||
(earliest, latest) ->
|
||||
topicPartitions.stream()
|
||||
.filter(tp -> earliest.containsKey(tp) && latest.containsKey(tp))
|
||||
|
|
|
@ -2,12 +2,12 @@ package com.provectus.kafka.ui.service.analyze;
|
|||
|
||||
import static com.provectus.kafka.ui.emitter.AbstractEmitter.NO_MORE_DATA_EMPTY_POLLS_COUNT;
|
||||
|
||||
import com.provectus.kafka.ui.emitter.OffsetsInfo;
|
||||
import com.provectus.kafka.ui.exception.TopicAnalysisException;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.TopicAnalysisDTO;
|
||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||
import com.provectus.kafka.ui.service.TopicsService;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeek.WaitingOffsets;
|
||||
import java.io.Closeable;
|
||||
import java.time.Duration;
|
||||
import java.time.Instant;
|
||||
|
@ -119,14 +119,14 @@ public class TopicAnalysisService {
|
|||
consumer.assign(topicPartitions);
|
||||
consumer.seekToBeginning(topicPartitions);
|
||||
|
||||
var waitingOffsets = new WaitingOffsets(topicId.topicName, consumer, topicPartitions);
|
||||
for (int emptyPolls = 0; !waitingOffsets.endReached() && emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT;) {
|
||||
var offsetsInfo = new OffsetsInfo(consumer, topicId.topicName);
|
||||
for (int emptyPolls = 0; !offsetsInfo.assignedPartitionsFullyPolled()
|
||||
&& emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT;) {
|
||||
var polled = consumer.poll(Duration.ofSeconds(3));
|
||||
emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
|
||||
polled.forEach(r -> {
|
||||
totalStats.apply(r);
|
||||
partitionStats.get(r.partition()).apply(r);
|
||||
waitingOffsets.markPolled(r);
|
||||
});
|
||||
updateProgress();
|
||||
}
|
||||
|
|
|
@ -1,166 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Spliterator;
|
||||
import java.util.Spliterators;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
import java.util.stream.Stream;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
public abstract class BaseStrategy {
|
||||
protected static final String KSQL_REQUEST_PATH = "/ksql";
|
||||
protected static final String QUERY_REQUEST_PATH = "/query";
|
||||
private static final String MAPPING_EXCEPTION_ERROR = "KSQL DB response mapping error";
|
||||
protected String host = null;
|
||||
protected KsqlCommandDTO ksqlCommand = null;
|
||||
|
||||
public String getUri() {
|
||||
if (this.host != null) {
|
||||
return this.host + this.getRequestPath();
|
||||
}
|
||||
throw new UnprocessableEntityException("Strategy doesn't have host");
|
||||
}
|
||||
|
||||
public boolean test(String sql) {
|
||||
return sql.trim().toLowerCase().matches(getTestRegExp());
|
||||
}
|
||||
|
||||
public BaseStrategy host(String host) {
|
||||
this.host = host;
|
||||
return this;
|
||||
}
|
||||
|
||||
public KsqlCommandDTO getKsqlCommand() {
|
||||
return ksqlCommand;
|
||||
}
|
||||
|
||||
public BaseStrategy ksqlCommand(KsqlCommandDTO ksqlCommand) {
|
||||
this.ksqlCommand = ksqlCommand;
|
||||
return this;
|
||||
}
|
||||
|
||||
protected String getRequestPath() {
|
||||
return BaseStrategy.KSQL_REQUEST_PATH;
|
||||
}
|
||||
|
||||
protected KsqlCommandResponseDTO serializeTableResponse(JsonNode response, String key) {
|
||||
JsonNode item = getResponseFirstItemValue(response, key);
|
||||
TableDTO table = item.isArray() ? getTableFromArray(item) : getTableFromObject(item);
|
||||
return (new KsqlCommandResponseDTO()).data(table);
|
||||
}
|
||||
|
||||
protected KsqlCommandResponseDTO serializeMessageResponse(JsonNode response, String key) {
|
||||
JsonNode item = getResponseFirstItemValue(response, key);
|
||||
return (new KsqlCommandResponseDTO()).message(getMessageFromObject(item));
|
||||
}
|
||||
|
||||
protected KsqlCommandResponseDTO serializeQueryResponse(JsonNode response) {
|
||||
if (response.isArray() && response.size() > 0) {
|
||||
TableDTO table = (new TableDTO())
|
||||
.headers(getQueryResponseHeader(response))
|
||||
.rows(getQueryResponseRows(response));
|
||||
return (new KsqlCommandResponseDTO()).data(table);
|
||||
}
|
||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
||||
}
|
||||
|
||||
private JsonNode getResponseFirstItemValue(JsonNode response, String key) {
|
||||
if (response.isArray() && response.size() > 0) {
|
||||
JsonNode first = response.get(0);
|
||||
if (first.has(key)) {
|
||||
return first.path(key);
|
||||
}
|
||||
}
|
||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
||||
}
|
||||
|
||||
private List<String> getQueryResponseHeader(JsonNode response) {
|
||||
JsonNode headerRow = response.get(0);
|
||||
if (headerRow.isObject() && headerRow.has("header")) {
|
||||
String schema = headerRow.get("header").get("schema").asText();
|
||||
return Arrays.stream(schema.split(",")).map(String::trim).collect(Collectors.toList());
|
||||
}
|
||||
return new ArrayList<>();
|
||||
}
|
||||
|
||||
private List<List<String>> getQueryResponseRows(JsonNode node) {
|
||||
return getStreamForJsonArray(node)
|
||||
.filter(row -> row.has("row") && row.get("row").has("columns"))
|
||||
.map(row -> row.get("row").get("columns"))
|
||||
.map(cellNode -> getStreamForJsonArray(cellNode)
|
||||
.map(JsonNode::asText)
|
||||
.collect(Collectors.toList())
|
||||
)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private TableDTO getTableFromArray(JsonNode node) {
|
||||
TableDTO table = new TableDTO();
|
||||
table.headers(new ArrayList<>()).rows(new ArrayList<>());
|
||||
if (node.size() > 0) {
|
||||
List<String> keys = getJsonObjectKeys(node.get(0));
|
||||
List<List<String>> rows = getTableRows(node, keys);
|
||||
table.headers(keys).rows(rows);
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
private TableDTO getTableFromObject(JsonNode node) {
|
||||
List<String> keys = getJsonObjectKeys(node);
|
||||
List<String> values = getJsonObjectValues(node);
|
||||
List<List<String>> rows = IntStream
|
||||
.range(0, keys.size())
|
||||
.mapToObj(i -> List.of(keys.get(i), values.get(i)))
|
||||
.collect(Collectors.toList());
|
||||
return (new TableDTO()).headers(List.of("key", "value")).rows(rows);
|
||||
}
|
||||
|
||||
private String getMessageFromObject(JsonNode node) {
|
||||
if (node.isObject() && node.has("message")) {
|
||||
return node.get("message").asText();
|
||||
}
|
||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
||||
}
|
||||
|
||||
private List<List<String>> getTableRows(JsonNode node, List<String> keys) {
|
||||
return getStreamForJsonArray(node)
|
||||
.map(row -> keys.stream()
|
||||
.map(header -> row.get(header).asText())
|
||||
.collect(Collectors.toList())
|
||||
)
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
private Stream<JsonNode> getStreamForJsonArray(JsonNode node) {
|
||||
if (node.isArray() && node.size() > 0) {
|
||||
return StreamSupport.stream(node.spliterator(), false);
|
||||
}
|
||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
||||
}
|
||||
|
||||
private List<String> getJsonObjectKeys(JsonNode node) {
|
||||
if (node.isObject()) {
|
||||
return StreamSupport.stream(
|
||||
Spliterators.spliteratorUnknownSize(node.fieldNames(), Spliterator.ORDERED), false
|
||||
).collect(Collectors.toList());
|
||||
}
|
||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
||||
}
|
||||
|
||||
private List<String> getJsonObjectValues(JsonNode node) {
|
||||
return getJsonObjectKeys(node).stream().map(key -> node.get(key).asText())
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
public abstract KsqlCommandResponseDTO serializeResponse(JsonNode response);
|
||||
|
||||
protected abstract String getTestRegExp();
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class CreateStrategy extends BaseStrategy {
|
||||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestRegExp() {
|
||||
return "create (table|stream)(.*)(with|as select(.*)from)(.*);";
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class DescribeStrategy extends BaseStrategy {
|
||||
private static final String RESPONSE_VALUE_KEY = "sourceDescription";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeTableResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestRegExp() {
|
||||
return "describe (.*);";
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class DropStrategy extends BaseStrategy {
|
||||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestRegExp() {
|
||||
return "drop (table|stream) (.*);";
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class ExplainStrategy extends BaseStrategy {
|
||||
private static final String RESPONSE_VALUE_KEY = "queryDescription";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeTableResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestRegExp() {
|
||||
return "explain (.*);";
|
||||
}
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class SelectStrategy extends BaseStrategy {
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeQueryResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getRequestPath() {
|
||||
return BaseStrategy.QUERY_REQUEST_PATH;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestRegExp() {
|
||||
return "select (.*) from (.*);";
|
||||
}
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class ShowStrategy extends BaseStrategy {
|
||||
private static final List<String> SHOW_STATEMENTS =
|
||||
List.of("functions", "topics", "streams", "tables", "queries", "properties");
|
||||
private static final List<String> LIST_STATEMENTS =
|
||||
List.of("functions", "topics", "streams", "tables");
|
||||
private String responseValueKey = "";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeTableResponse(response, responseValueKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean test(String sql) {
|
||||
Optional<String> statement = SHOW_STATEMENTS.stream()
|
||||
.filter(s -> testSql(sql, getShowRegExp(s)) || testSql(sql, getListRegExp(s)))
|
||||
.findFirst();
|
||||
if (statement.isPresent()) {
|
||||
setResponseValueKey(statement.get());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestRegExp() {
|
||||
return "";
|
||||
}
|
||||
|
||||
@Override
|
||||
public BaseStrategy ksqlCommand(KsqlCommandDTO ksqlCommand) {
|
||||
// return new instance to avoid conflicts for parallel requests
|
||||
ShowStrategy clone = new ShowStrategy();
|
||||
clone.setResponseValueKey(responseValueKey);
|
||||
clone.ksqlCommand = ksqlCommand;
|
||||
return clone;
|
||||
}
|
||||
|
||||
protected String getShowRegExp(String key) {
|
||||
return "show " + key + ";";
|
||||
}
|
||||
|
||||
protected String getListRegExp(String key) {
|
||||
if (LIST_STATEMENTS.contains(key)) {
|
||||
return "list " + key + ";";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
private void setResponseValueKey(String path) {
|
||||
responseValueKey = path;
|
||||
}
|
||||
|
||||
private boolean testSql(String sql, String pattern) {
|
||||
return sql.trim().toLowerCase().matches(pattern);
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import org.springframework.stereotype.Component;
|
||||
|
||||
@Component
|
||||
public class TerminateStrategy extends BaseStrategy {
|
||||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
||||
|
||||
@Override
|
||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String getTestRegExp() {
|
||||
return "terminate (.*);";
|
||||
}
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Slf4j
|
||||
public abstract class OffsetsSeek {
|
||||
protected final String topic;
|
||||
protected final ConsumerPosition consumerPosition;
|
||||
|
||||
protected OffsetsSeek(String topic, ConsumerPosition consumerPosition) {
|
||||
this.topic = topic;
|
||||
this.consumerPosition = consumerPosition;
|
||||
}
|
||||
|
||||
public ConsumerPosition getConsumerPosition() {
|
||||
return consumerPosition;
|
||||
}
|
||||
|
||||
public Map<TopicPartition, Long> getPartitionsOffsets(Consumer<Bytes, Bytes> consumer) {
|
||||
SeekTypeDTO seekType = consumerPosition.getSeekType();
|
||||
List<TopicPartition> partitions = getRequestedPartitions(consumer);
|
||||
log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
|
||||
Map<TopicPartition, Long> offsets;
|
||||
switch (seekType) {
|
||||
case OFFSET:
|
||||
offsets = offsetsFromPositions(consumer, partitions);
|
||||
break;
|
||||
case TIMESTAMP:
|
||||
offsets = offsetsForTimestamp(consumer);
|
||||
break;
|
||||
case BEGINNING:
|
||||
offsets = offsetsFromBeginning(consumer, partitions);
|
||||
break;
|
||||
case LATEST:
|
||||
offsets = endOffsets(consumer, partitions);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown seekType: " + seekType);
|
||||
}
|
||||
return offsets;
|
||||
}
|
||||
|
||||
public WaitingOffsets waitingOffsets(Consumer<Bytes, Bytes> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
return new WaitingOffsets(topic, consumer, partitions);
|
||||
}
|
||||
|
||||
public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
|
||||
final Map<TopicPartition, Long> partitionsOffsets = getPartitionsOffsets(consumer);
|
||||
consumer.assign(partitionsOffsets.keySet());
|
||||
partitionsOffsets.forEach(consumer::seek);
|
||||
log.info("Assignment: {}", consumer.assignment());
|
||||
return waitingOffsets(consumer, partitionsOffsets.keySet());
|
||||
}
|
||||
|
||||
|
||||
public List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
|
||||
Map<TopicPartition, Long> partitionPositions = consumerPosition.getSeekTo();
|
||||
return consumer.partitionsFor(topic).stream()
|
||||
.filter(
|
||||
p -> partitionPositions.isEmpty()
|
||||
|| partitionPositions.containsKey(new TopicPartition(p.topic(), p.partition()))
|
||||
).map(p -> new TopicPartition(p.topic(), p.partition()))
|
||||
.collect(Collectors.toList());
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> endOffsets(
|
||||
Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions) {
|
||||
return consumer.endOffsets(partitions);
|
||||
}
|
||||
|
||||
protected abstract Map<TopicPartition, Long> offsetsFromBeginning(
|
||||
Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions);
|
||||
|
||||
protected abstract Map<TopicPartition, Long> offsetsForTimestamp(
|
||||
Consumer<Bytes, Bytes> consumer);
|
||||
|
||||
protected abstract Map<TopicPartition, Long> offsetsFromPositions(
|
||||
Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions);
|
||||
|
||||
public static class WaitingOffsets {
|
||||
private final Map<Integer, Long> endOffsets; // partition number -> offset
|
||||
private final Map<Integer, Long> beginOffsets; // partition number -> offset
|
||||
|
||||
public WaitingOffsets(String topic, Consumer<?, ?> consumer,
|
||||
Collection<TopicPartition> partitions) {
|
||||
var allBeginningOffsets = consumer.beginningOffsets(partitions);
|
||||
var allEndOffsets = consumer.endOffsets(partitions);
|
||||
|
||||
this.endOffsets = allEndOffsets.entrySet().stream()
|
||||
.filter(entry -> !allBeginningOffsets.get(entry.getKey()).equals(entry.getValue()))
|
||||
.map(e -> Tuples.of(e.getKey().partition(), e.getValue() - 1))
|
||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
||||
|
||||
this.beginOffsets = this.endOffsets.keySet().stream()
|
||||
.map(p -> Tuples.of(p, allBeginningOffsets.get(new TopicPartition(topic, p))))
|
||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
||||
}
|
||||
|
||||
public void markPolled(ConsumerRecord<?, ?> rec) {
|
||||
markPolled(rec.partition(), rec.offset());
|
||||
}
|
||||
|
||||
public void markPolled(int partition, long offset) {
|
||||
Long endWaiting = endOffsets.get(partition);
|
||||
if (endWaiting != null && endWaiting <= offset) {
|
||||
endOffsets.remove(partition);
|
||||
}
|
||||
Long beginWaiting = beginOffsets.get(partition);
|
||||
if (beginWaiting != null && beginWaiting >= offset) {
|
||||
beginOffsets.remove(partition);
|
||||
}
|
||||
}
|
||||
|
||||
public boolean endReached() {
|
||||
return endOffsets.isEmpty();
|
||||
}
|
||||
|
||||
public boolean beginReached() {
|
||||
return beginOffsets.isEmpty();
|
||||
}
|
||||
|
||||
public Map<Integer, Long> getEndOffsets() {
|
||||
return endOffsets;
|
||||
}
|
||||
|
||||
public Map<Integer, Long> getBeginOffsets() {
|
||||
return beginOffsets;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,120 +0,0 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import reactor.util.function.Tuple2;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
@Slf4j
|
||||
public class OffsetsSeekBackward extends OffsetsSeek {
|
||||
|
||||
private final int maxMessages;
|
||||
|
||||
public OffsetsSeekBackward(String topic,
|
||||
ConsumerPosition consumerPosition, int maxMessages) {
|
||||
super(topic, consumerPosition);
|
||||
this.maxMessages = maxMessages;
|
||||
}
|
||||
|
||||
public int msgsPerPartition(int partitionsSize) {
|
||||
return msgsPerPartition(maxMessages, partitionsSize);
|
||||
}
|
||||
|
||||
public int msgsPerPartition(long awaitingMessages, int partitionsSize) {
|
||||
return (int) Math.ceil((double) awaitingMessages / partitionsSize);
|
||||
}
|
||||
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
|
||||
List<TopicPartition> partitions) {
|
||||
|
||||
return findOffsetsInt(consumer, consumerPosition.getSeekTo(), partitions);
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
|
||||
List<TopicPartition> partitions) {
|
||||
return findOffsets(consumer, Map.of(), partitions);
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsForTimestamp(Consumer<Bytes, Bytes> consumer) {
|
||||
Map<TopicPartition, Long> timestampsToSearch =
|
||||
consumerPosition.getSeekTo().entrySet().stream()
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
Map.Entry::getValue
|
||||
));
|
||||
Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
|
||||
.entrySet().stream()
|
||||
.filter(e -> e.getValue() != null)
|
||||
.map(v -> Tuples.of(v.getKey(), v.getValue().offset()))
|
||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
||||
|
||||
if (offsetsForTimestamps.isEmpty()) {
|
||||
throw new IllegalArgumentException("No offsets were found for requested timestamps");
|
||||
}
|
||||
|
||||
log.info("Timestamps: {} to offsets: {}", timestampsToSearch, offsetsForTimestamps);
|
||||
|
||||
return findOffsets(consumer, offsetsForTimestamps, offsetsForTimestamps.keySet());
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> findOffsetsInt(
|
||||
Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo,
|
||||
List<TopicPartition> partitions) {
|
||||
return findOffsets(consumer, seekTo, partitions);
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> findOffsets(
|
||||
Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo,
|
||||
Collection<TopicPartition> partitions) {
|
||||
|
||||
final Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
|
||||
final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
|
||||
|
||||
final Map<TopicPartition, Long> seekMap = new HashMap<>();
|
||||
final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
||||
|
||||
for (Map.Entry<TopicPartition, Long> entry : seekTo.entrySet()) {
|
||||
final Long endOffset = endOffsets.get(entry.getKey());
|
||||
final Long beginningOffset = beginningOffsets.get(entry.getKey());
|
||||
if (beginningOffset != null
|
||||
&& endOffset != null
|
||||
&& beginningOffset < endOffset
|
||||
&& entry.getValue() > beginningOffset
|
||||
) {
|
||||
final Long value;
|
||||
if (entry.getValue() > endOffset) {
|
||||
value = endOffset;
|
||||
} else {
|
||||
value = entry.getValue();
|
||||
}
|
||||
|
||||
seekMap.put(entry.getKey(), value);
|
||||
} else {
|
||||
emptyPartitions.add(entry.getKey());
|
||||
}
|
||||
}
|
||||
|
||||
Set<TopicPartition> waiting = new HashSet<>(partitions);
|
||||
waiting.removeAll(emptyPartitions);
|
||||
waiting.removeAll(seekMap.keySet());
|
||||
|
||||
for (TopicPartition topicPartition : waiting) {
|
||||
seekMap.put(topicPartition, endOffsets.get(topicPartition));
|
||||
}
|
||||
|
||||
return seekMap;
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
import lombok.extern.slf4j.Slf4j;
|
||||
import org.apache.kafka.clients.consumer.Consumer;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
|
||||
@Slf4j
|
||||
public class OffsetsSeekForward extends OffsetsSeek {
|
||||
|
||||
public OffsetsSeekForward(String topic, ConsumerPosition consumerPosition) {
|
||||
super(topic, consumerPosition);
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
|
||||
List<TopicPartition> partitions) {
|
||||
final Map<TopicPartition, Long> offsets =
|
||||
offsetsFromBeginning(consumer, partitions);
|
||||
|
||||
final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(offsets.keySet());
|
||||
final Set<TopicPartition> set = new HashSet<>(consumerPosition.getSeekTo().keySet());
|
||||
final Map<TopicPartition, Long> collect = consumerPosition.getSeekTo().entrySet().stream()
|
||||
.filter(e -> e.getValue() < endOffsets.get(e.getKey()))
|
||||
.filter(e -> endOffsets.get(e.getKey()) > offsets.get(e.getKey()))
|
||||
.collect(Collectors.toMap(
|
||||
Map.Entry::getKey,
|
||||
Map.Entry::getValue
|
||||
));
|
||||
offsets.putAll(collect);
|
||||
set.removeAll(collect.keySet());
|
||||
set.forEach(offsets::remove);
|
||||
|
||||
return offsets;
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsForTimestamp(Consumer<Bytes, Bytes> consumer) {
|
||||
Map<TopicPartition, Long> offsetsForTimestamps =
|
||||
consumer.offsetsForTimes(consumerPosition.getSeekTo())
|
||||
.entrySet().stream()
|
||||
.filter(e -> e.getValue() != null)
|
||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
|
||||
|
||||
if (offsetsForTimestamps.isEmpty()) {
|
||||
throw new IllegalArgumentException("No offsets were found for requested timestamps");
|
||||
}
|
||||
|
||||
return offsetsForTimestamps;
|
||||
}
|
||||
|
||||
protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
|
||||
List<TopicPartition> partitions) {
|
||||
return consumer.beginningOffsets(partitions);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.kafka.clients.consumer.MockConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class OffsetsInfoTest {
|
||||
|
||||
final String topic = "test";
|
||||
final TopicPartition tp0 = new TopicPartition(topic, 0); //offsets: start 0, end 0
|
||||
final TopicPartition tp1 = new TopicPartition(topic, 1); //offsets: start 10, end 10
|
||||
final TopicPartition tp2 = new TopicPartition(topic, 2); //offsets: start 0, end 20
|
||||
final TopicPartition tp3 = new TopicPartition(topic, 3); //offsets: start 25, end 30
|
||||
|
||||
MockConsumer<Bytes, Bytes> consumer;
|
||||
|
||||
@BeforeEach
|
||||
void initMockConsumer() {
|
||||
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
|
||||
consumer.updatePartitions(
|
||||
topic,
|
||||
Stream.of(tp0, tp1, tp2, tp3)
|
||||
.map(tp -> new PartitionInfo(topic, tp.partition(), null, null, null, null))
|
||||
.collect(Collectors.toList()));
|
||||
consumer.updateBeginningOffsets(Map.of(tp0, 0L, tp1, 10L, tp2, 0L, tp3, 25L));
|
||||
consumer.updateEndOffsets(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L));
|
||||
}
|
||||
|
||||
@Test
|
||||
void fillsInnerFieldsAccordingToTopicState() {
|
||||
var offsets = new OffsetsInfo(consumer, List.of(tp0, tp1, tp2, tp3));
|
||||
|
||||
assertThat(offsets.getBeginOffsets()).containsEntry(tp0, 0L).containsEntry(tp1, 10L).containsEntry(tp2, 0L)
|
||||
.containsEntry(tp3, 25L);
|
||||
|
||||
assertThat(offsets.getEndOffsets()).containsEntry(tp0, 0L).containsEntry(tp1, 10L).containsEntry(tp2, 20L)
|
||||
.containsEntry(tp3, 30L);
|
||||
|
||||
assertThat(offsets.getEmptyPartitions()).contains(tp0, tp1);
|
||||
assertThat(offsets.getNonEmptyPartitions()).contains(tp2, tp3);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
package com.provectus.kafka.ui.emitter;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.kafka.clients.consumer.MockConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Nested;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class SeekOperationsTest {
|
||||
|
||||
final String topic = "test";
|
||||
final TopicPartition tp0 = new TopicPartition(topic, 0); //offsets: start 0, end 0
|
||||
final TopicPartition tp1 = new TopicPartition(topic, 1); //offsets: start 10, end 10
|
||||
final TopicPartition tp2 = new TopicPartition(topic, 2); //offsets: start 0, end 20
|
||||
final TopicPartition tp3 = new TopicPartition(topic, 3); //offsets: start 25, end 30
|
||||
|
||||
MockConsumer<Bytes, Bytes> consumer;
|
||||
|
||||
@BeforeEach
|
||||
void initMockConsumer() {
|
||||
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
|
||||
consumer.updatePartitions(
|
||||
topic,
|
||||
Stream.of(tp0, tp1, tp2, tp3)
|
||||
.map(tp -> new PartitionInfo(topic, tp.partition(), null, null, null, null))
|
||||
.collect(Collectors.toList()));
|
||||
consumer.updateBeginningOffsets(Map.of(tp0, 0L, tp1, 10L, tp2, 0L, tp3, 25L));
|
||||
consumer.updateEndOffsets(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L));
|
||||
}
|
||||
|
||||
@Nested
|
||||
class GetOffsetsForSeek {
|
||||
|
||||
@Test
|
||||
void latest() {
|
||||
var offsets = SeekOperations.getOffsetsForSeek(
|
||||
consumer,
|
||||
new OffsetsInfo(consumer, topic),
|
||||
SeekTypeDTO.LATEST,
|
||||
null
|
||||
);
|
||||
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L));
|
||||
}
|
||||
|
||||
@Test
|
||||
void beginning() {
|
||||
var offsets = SeekOperations.getOffsetsForSeek(
|
||||
consumer,
|
||||
new OffsetsInfo(consumer, topic),
|
||||
SeekTypeDTO.BEGINNING,
|
||||
null
|
||||
);
|
||||
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L));
|
||||
}
|
||||
|
||||
@Test
|
||||
void offsets() {
|
||||
var offsets = SeekOperations.getOffsetsForSeek(
|
||||
consumer,
|
||||
new OffsetsInfo(consumer, topic),
|
||||
SeekTypeDTO.OFFSET,
|
||||
Map.of(tp1, 10L, tp2, 10L, tp3, 26L)
|
||||
);
|
||||
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 10L, tp3, 26L));
|
||||
}
|
||||
|
||||
@Test
|
||||
void offsetsWithBoundsFixing() {
|
||||
var offsets = SeekOperations.getOffsetsForSeek(
|
||||
consumer,
|
||||
new OffsetsInfo(consumer, topic),
|
||||
SeekTypeDTO.OFFSET,
|
||||
Map.of(tp1, 10L, tp2, 21L, tp3, 24L)
|
||||
);
|
||||
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 25L));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -111,10 +111,11 @@ class TailingEmitterTest extends AbstractIntegrationTest {
|
|||
|
||||
return applicationContext.getBean(MessagesService.class)
|
||||
.loadMessages(cluster, topicName,
|
||||
new ConsumerPosition(SeekTypeDTO.LATEST, Map.of(), SeekDirectionDTO.TAILING),
|
||||
new ConsumerPosition(SeekTypeDTO.LATEST, topic, null),
|
||||
query,
|
||||
MessageFilterTypeDTO.STRING_CONTAINS,
|
||||
0,
|
||||
SeekDirectionDTO.TAILING,
|
||||
"String",
|
||||
"String");
|
||||
}
|
||||
|
@ -137,7 +138,7 @@ class TailingEmitterTest extends AbstractIntegrationTest {
|
|||
Awaitility.await()
|
||||
.pollInSameThread()
|
||||
.pollDelay(Duration.ofMillis(100))
|
||||
.atMost(Duration.ofSeconds(10))
|
||||
.atMost(Duration.ofSeconds(200))
|
||||
.until(() -> fluxOutput.stream()
|
||||
.anyMatch(msg -> msg.getType() == TopicMessageEventDTO.TypeEnum.CONSUMING));
|
||||
}
|
||||
|
|
|
@ -1,104 +0,0 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.mockito.ArgumentMatchers.any;
|
||||
import static org.mockito.ArgumentMatchers.eq;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
import com.provectus.kafka.ui.client.KsqlClient;
|
||||
import com.provectus.kafka.ui.exception.KsqlDbNotFoundException;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.InternalKsqlServer;
|
||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.DescribeStrategy;
|
||||
import com.provectus.kafka.ui.strategy.ksql.statement.ShowStrategy;
|
||||
import java.util.List;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.Mock;
|
||||
import org.mockito.Mockito;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.test.StepVerifier;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class KsqlServiceTest {
|
||||
private KsqlService ksqlService;
|
||||
private BaseStrategy baseStrategy;
|
||||
private BaseStrategy alternativeStrategy;
|
||||
|
||||
@Mock
|
||||
private ClustersStorage clustersStorage;
|
||||
@Mock
|
||||
private KsqlClient ksqlClient;
|
||||
|
||||
|
||||
@BeforeEach
|
||||
public void setUp() {
|
||||
this.baseStrategy = new ShowStrategy();
|
||||
this.alternativeStrategy = new DescribeStrategy();
|
||||
this.ksqlService = new KsqlService(
|
||||
this.ksqlClient,
|
||||
List.of(baseStrategy, alternativeStrategy)
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldThrowKsqlDbNotFoundExceptionOnExecuteKsqlCommand() {
|
||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;");
|
||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||
when(kafkaCluster.getKsqldbServer()).thenReturn(null);
|
||||
|
||||
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
||||
.verifyError(KsqlDbNotFoundException.class);
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldThrowUnprocessableEntityExceptionOnExecuteKsqlCommand() {
|
||||
KsqlCommandDTO command =
|
||||
(new KsqlCommandDTO()).ksql("CREATE STREAM users WITH (KAFKA_TOPIC='users');");
|
||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||
when(kafkaCluster.getKsqldbServer()).thenReturn(InternalKsqlServer.builder().url("localhost:8088").build());
|
||||
|
||||
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
||||
.verifyError(UnprocessableEntityException.class);
|
||||
|
||||
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
||||
.verifyErrorMessage("Invalid sql");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSetHostToStrategy() {
|
||||
String host = "localhost:8088";
|
||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||
|
||||
when(kafkaCluster.getKsqldbServer()).thenReturn(InternalKsqlServer.builder().url(host).build());
|
||||
when(ksqlClient.execute(any(), any())).thenReturn(Mono.just(new KsqlCommandResponseDTO()));
|
||||
|
||||
ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)).block();
|
||||
assertThat(alternativeStrategy.getUri()).isEqualTo(host + "/ksql");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldCallClientAndReturnResponse() {
|
||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
||||
KsqlCommandResponseDTO response = new KsqlCommandResponseDTO().message("success");
|
||||
|
||||
when(kafkaCluster.getKsqldbServer()).thenReturn(InternalKsqlServer.builder().url("host").build());
|
||||
when(ksqlClient.execute(any(), any())).thenReturn(Mono.just(response));
|
||||
|
||||
KsqlCommandResponseDTO receivedResponse =
|
||||
ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)).block();
|
||||
verify(ksqlClient, times(1)).execute(eq(alternativeStrategy), any());
|
||||
assertThat(receivedResponse).isEqualTo(response);
|
||||
|
||||
}
|
||||
}
|
|
@ -45,7 +45,7 @@ class MessagesServiceTest extends AbstractIntegrationTest {
|
|||
@Test
|
||||
void loadMessagesReturnsExceptionWhenTopicNotFound() {
|
||||
StepVerifier.create(messagesService
|
||||
.loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, "String", "String"))
|
||||
.loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, null, "String", "String"))
|
||||
.expectError(TopicNotFoundException.class)
|
||||
.verify();
|
||||
}
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
package com.provectus.kafka.ui.service;
|
||||
|
||||
import static com.provectus.kafka.ui.model.SeekDirectionDTO.BACKWARD;
|
||||
import static com.provectus.kafka.ui.model.SeekDirectionDTO.FORWARD;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.LATEST;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET;
|
||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
@ -17,8 +16,6 @@ import com.provectus.kafka.ui.serde.api.Serde;
|
|||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||
import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
|
||||
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
||||
import com.provectus.kafka.ui.util.OffsetsSeekForward;
|
||||
import java.io.Serializable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -112,18 +109,15 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
void pollNothingOnEmptyTopic() {
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekForward(EMPTY_TOPIC,
|
||||
new ConsumerPosition(BEGINNING, Map.of(), FORWARD)
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(
|
||||
EMPTY_TOPIC,
|
||||
new ConsumerPosition(BEGINNING, Map.of(), BACKWARD),
|
||||
100
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||
100,
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
StepVerifier.create(
|
||||
|
@ -143,17 +137,15 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
void pollFullTopicFromBeginning() {
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekForward(TOPIC,
|
||||
new ConsumerPosition(BEGINNING, Map.of(), FORWARD)
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(BEGINNING, TOPIC, null),
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(BEGINNING, Map.of(), BACKWARD),
|
||||
PARTITIONS * MSGS_PER_PARTITION
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(LATEST, TOPIC, null),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
List<String> expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList());
|
||||
|
@ -172,17 +164,15 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekForward(TOPIC,
|
||||
new ConsumerPosition(OFFSET, targetOffsets, FORWARD)
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(OFFSET, targetOffsets, BACKWARD),
|
||||
PARTITIONS * MSGS_PER_PARTITION
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
var expectedValues = SENT_RECORDS.stream()
|
||||
|
@ -217,17 +207,15 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
|
||||
var forwardEmitter = new ForwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekForward(TOPIC,
|
||||
new ConsumerPosition(TIMESTAMP, targetTimestamps, FORWARD)
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(TIMESTAMP, targetTimestamps, BACKWARD),
|
||||
PARTITIONS * MSGS_PER_PARTITION
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||
PARTITIONS * MSGS_PER_PARTITION,
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
var expectedValues = SENT_RECORDS.stream()
|
||||
|
@ -255,10 +243,9 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(OFFSET, targetOffsets, BACKWARD),
|
||||
numMessages
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||
numMessages,
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
var expectedValues = SENT_RECORDS.stream()
|
||||
|
@ -281,10 +268,9 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
|
||||
var backwardEmitter = new BackwardRecordEmitter(
|
||||
this::createConsumer,
|
||||
new OffsetsSeekBackward(TOPIC,
|
||||
new ConsumerPosition(OFFSET, offsets, BACKWARD),
|
||||
100
|
||||
), RECORD_DESERIALIZER
|
||||
new ConsumerPosition(OFFSET, TOPIC, offsets),
|
||||
100,
|
||||
RECORD_DESERIALIZER
|
||||
);
|
||||
|
||||
expectEmitter(backwardEmitter,
|
||||
|
@ -331,7 +317,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
|||
final Map<String, ? extends Serializable> map = Map.of(
|
||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(),
|
||||
ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(),
|
||||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 20, // to check multiple polls
|
||||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19, // to check multiple polls
|
||||
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class,
|
||||
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class
|
||||
);
|
||||
|
|
|
@ -502,12 +502,13 @@ public class SendAndReadTests extends AbstractIntegrationTest {
|
|||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekTypeDTO.BEGINNING,
|
||||
Map.of(new TopicPartition(topic, 0), 0L),
|
||||
SeekDirectionDTO.FORWARD
|
||||
topic,
|
||||
Map.of(new TopicPartition(topic, 0), 0L)
|
||||
),
|
||||
null,
|
||||
null,
|
||||
1,
|
||||
SeekDirectionDTO.FORWARD,
|
||||
msgToSend.getKeySerde().get(),
|
||||
msgToSend.getValueSerde().get()
|
||||
).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||
|
|
|
@ -1,85 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class CreateStrategyTest {
|
||||
private final ObjectMapper mapper = new ObjectMapper();
|
||||
private CreateStrategy strategy;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
strategy = new CreateStrategy();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnUri() {
|
||||
strategy.host("ksqldb-server:8088");
|
||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnTrueInTest() {
|
||||
assertTrue(strategy.test("CREATE STREAM stream WITH (KAFKA_TOPIC='topic');"));
|
||||
assertTrue(strategy.test("CREATE STREAM stream"
|
||||
+ " AS SELECT users.id AS userid FROM users EMIT CHANGES;"
|
||||
));
|
||||
assertTrue(strategy.test(
|
||||
"CREATE TABLE table (id VARCHAR) WITH (KAFKA_TOPIC='table');"
|
||||
));
|
||||
assertTrue(strategy.test(
|
||||
"CREATE TABLE pageviews_regions WITH (KEY_FORMAT='JSON')"
|
||||
+ " AS SELECT gender, COUNT(*) AS numbers"
|
||||
+ " FROM pageviews EMIT CHANGES;"
|
||||
));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnFalseInTest() {
|
||||
assertFalse(strategy.test("show streams;"));
|
||||
assertFalse(strategy.test("show tables;"));
|
||||
assertFalse(strategy.test("CREATE TABLE test;"));
|
||||
assertFalse(strategy.test("CREATE STREAM test;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
String message = "updated successful";
|
||||
JsonNode node = getResponseWithMessage(message);
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeWithException() {
|
||||
JsonNode commandStatusNode = mapper.createObjectNode().put("commandStatus", "nodeWithMessage");
|
||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
||||
Exception exception = assertThrows(
|
||||
UnprocessableEntityException.class,
|
||||
() -> strategy.serializeResponse(node)
|
||||
);
|
||||
|
||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private JsonNode getResponseWithMessage(String message) {
|
||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("message", message);
|
||||
JsonNode commandStatusNode = mapper.createObjectNode().set("commandStatus", nodeWithMessage);
|
||||
return mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
||||
}
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.List;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class DescribeStrategyTest {
|
||||
private final ObjectMapper mapper = new ObjectMapper();
|
||||
private DescribeStrategy strategy;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
strategy = new DescribeStrategy();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnUri() {
|
||||
strategy.host("ksqldb-server:8088");
|
||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnTrueInTest() {
|
||||
assertTrue(strategy.test("DESCRIBE users;"));
|
||||
assertTrue(strategy.test("DESCRIBE EXTENDED users;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnFalseInTest() {
|
||||
assertFalse(strategy.test("list streams;"));
|
||||
assertFalse(strategy.test("show tables;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
JsonNode node = getResponseWithObjectNode();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("key", "value"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("name", "kafka")));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeWithException() {
|
||||
JsonNode sourceDescriptionNode =
|
||||
mapper.createObjectNode().put("sourceDescription", "nodeWithMessage");
|
||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(sourceDescriptionNode));
|
||||
Exception exception = assertThrows(
|
||||
UnprocessableEntityException.class,
|
||||
() -> strategy.serializeResponse(node)
|
||||
);
|
||||
|
||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private JsonNode getResponseWithObjectNode() {
|
||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("name", "kafka");
|
||||
JsonNode nodeWithResponse = mapper.createObjectNode().set("sourceDescription", nodeWithMessage);
|
||||
return mapper.createArrayNode().add(mapper.valueToTree(nodeWithResponse));
|
||||
}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class DropStrategyTest {
|
||||
private final ObjectMapper mapper = new ObjectMapper();
|
||||
private DropStrategy strategy;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
strategy = new DropStrategy();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnUri() {
|
||||
strategy.host("ksqldb-server:8088");
|
||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnTrueInTest() {
|
||||
assertTrue(strategy.test("drop table table1;"));
|
||||
assertTrue(strategy.test("drop stream stream2;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnFalseInTest() {
|
||||
assertFalse(strategy.test("show streams;"));
|
||||
assertFalse(strategy.test("show tables;"));
|
||||
assertFalse(strategy.test("create table test;"));
|
||||
assertFalse(strategy.test("create stream test;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
String message = "updated successful";
|
||||
JsonNode node = getResponseWithMessage(message);
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeWithException() {
|
||||
JsonNode commandStatusNode = mapper.createObjectNode().put("commandStatus", "nodeWithMessage");
|
||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
||||
Exception exception = assertThrows(
|
||||
UnprocessableEntityException.class,
|
||||
() -> strategy.serializeResponse(node)
|
||||
);
|
||||
|
||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private JsonNode getResponseWithMessage(String message) {
|
||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("message", message);
|
||||
JsonNode commandStatusNode = mapper.createObjectNode().set("commandStatus", nodeWithMessage);
|
||||
return mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
||||
}
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.List;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class ExplainStrategyTest {
|
||||
private final ObjectMapper mapper = new ObjectMapper();
|
||||
private ExplainStrategy strategy;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
strategy = new ExplainStrategy();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnUri() {
|
||||
strategy.host("ksqldb-server:8088");
|
||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnTrueInTest() {
|
||||
assertTrue(strategy.test("explain users_query_id;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnFalseInTest() {
|
||||
assertFalse(strategy.test("show queries;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
JsonNode node = getResponseWithObjectNode();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("key", "value"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("name", "kafka")));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeWithException() {
|
||||
JsonNode sourceDescriptionNode =
|
||||
mapper.createObjectNode().put("sourceDescription", "nodeWithMessage");
|
||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(sourceDescriptionNode));
|
||||
Exception exception = assertThrows(
|
||||
UnprocessableEntityException.class,
|
||||
() -> strategy.serializeResponse(node)
|
||||
);
|
||||
|
||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private JsonNode getResponseWithObjectNode() {
|
||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("name", "kafka");
|
||||
JsonNode nodeWithResponse = mapper.createObjectNode().set("queryDescription", nodeWithMessage);
|
||||
return mapper.createArrayNode().add(mapper.valueToTree(nodeWithResponse));
|
||||
}
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.List;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class SelectStrategyTest {
|
||||
private final ObjectMapper mapper = new ObjectMapper();
|
||||
private SelectStrategy strategy;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
strategy = new SelectStrategy();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnUri() {
|
||||
strategy.host("ksqldb-server:8088");
|
||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/query");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnTrueInTest() {
|
||||
assertTrue(strategy.test("select * from users;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnFalseInTest() {
|
||||
assertFalse(strategy.test("show streams;"));
|
||||
assertFalse(strategy.test("select *;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
JsonNode node = getResponseWithData();
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header1", "header2"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value1", "value2")));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeWithException() {
|
||||
JsonNode node = mapper.createObjectNode();
|
||||
Exception exception = assertThrows(
|
||||
UnprocessableEntityException.class,
|
||||
() -> strategy.serializeResponse(node)
|
||||
);
|
||||
|
||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private JsonNode getResponseWithData() {
|
||||
JsonNode headerNode = mapper.createObjectNode().set(
|
||||
"header", mapper.createObjectNode().put("schema", "header1, header2")
|
||||
);
|
||||
JsonNode row = mapper.createObjectNode().set(
|
||||
"row", mapper.createObjectNode().set(
|
||||
"columns", mapper.createArrayNode().add("value1").add("value2")
|
||||
)
|
||||
);
|
||||
return mapper.createArrayNode().add(headerNode).add(row);
|
||||
}
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import com.provectus.kafka.ui.model.TableDTO;
|
||||
import java.util.List;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.DynamicTest;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.TestFactory;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class ShowStrategyTest {
|
||||
private final ObjectMapper mapper = new ObjectMapper();
|
||||
private ShowStrategy strategy;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
strategy = new ShowStrategy();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnUri() {
|
||||
strategy.host("ksqldb-server:8088");
|
||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnTrueInTest() {
|
||||
assertTrue(strategy.test("SHOW STREAMS;"));
|
||||
assertTrue(strategy.test("SHOW TABLES;"));
|
||||
assertTrue(strategy.test("SHOW TOPICS;"));
|
||||
assertTrue(strategy.test("SHOW QUERIES;"));
|
||||
assertTrue(strategy.test("SHOW PROPERTIES;"));
|
||||
assertTrue(strategy.test("SHOW FUNCTIONS;"));
|
||||
assertTrue(strategy.test("LIST STREAMS;"));
|
||||
assertTrue(strategy.test("LIST TABLES;"));
|
||||
assertTrue(strategy.test("LIST TOPICS;"));
|
||||
assertTrue(strategy.test("LIST FUNCTIONS;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnFalseInTest() {
|
||||
assertFalse(strategy.test("LIST QUERIES;"));
|
||||
assertFalse(strategy.test("LIST PROPERTIES;"));
|
||||
}
|
||||
|
||||
@TestFactory
|
||||
public Iterable<DynamicTest> shouldSerialize() {
|
||||
return List.of(
|
||||
shouldSerializeGenerate("streams", "show streams;"),
|
||||
shouldSerializeGenerate("tables", "show tables;"),
|
||||
shouldSerializeGenerate("topics", "show topics;"),
|
||||
shouldSerializeGenerate("properties", "show properties;"),
|
||||
shouldSerializeGenerate("functions", "show functions;"),
|
||||
shouldSerializeGenerate("queries", "show queries;")
|
||||
);
|
||||
}
|
||||
|
||||
public DynamicTest shouldSerializeGenerate(final String key, final String sql) {
|
||||
return DynamicTest.dynamicTest("Should serialize " + key,
|
||||
() -> {
|
||||
JsonNode node = getResponseWithData(key);
|
||||
strategy.test(sql);
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
TableDTO table = serializedResponse.getData();
|
||||
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
|
||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeWithException() {
|
||||
JsonNode node = getResponseWithData("streams");
|
||||
strategy.test("show tables;");
|
||||
Exception exception = assertThrows(
|
||||
UnprocessableEntityException.class,
|
||||
() -> strategy.serializeResponse(node)
|
||||
);
|
||||
|
||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private JsonNode getResponseWithData(String key) {
|
||||
JsonNode nodeWithDataItem = mapper.createObjectNode().put("header", "value");
|
||||
JsonNode nodeWithData = mapper.createArrayNode().add(nodeWithDataItem);
|
||||
JsonNode nodeWithResponse = mapper.createObjectNode().set(key, nodeWithData);
|
||||
return mapper.createArrayNode().add(mapper.valueToTree(nodeWithResponse));
|
||||
}
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
|
||||
import com.fasterxml.jackson.databind.JsonNode;
|
||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
||||
import lombok.SneakyThrows;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
import org.mockito.junit.jupiter.MockitoExtension;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
class TerminateStrategyTest {
|
||||
private final ObjectMapper mapper = new ObjectMapper();
|
||||
private TerminateStrategy strategy;
|
||||
|
||||
@BeforeEach
|
||||
void setUp() {
|
||||
strategy = new TerminateStrategy();
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnUri() {
|
||||
strategy.host("ksqldb-server:8088");
|
||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnTrueInTest() {
|
||||
assertTrue(strategy.test("terminate query_id;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldReturnFalseInTest() {
|
||||
assertFalse(strategy.test("show streams;"));
|
||||
assertFalse(strategy.test("create table test;"));
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeResponse() {
|
||||
String message = "query terminated.";
|
||||
JsonNode node = getResponseWithMessage(message);
|
||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
||||
|
||||
}
|
||||
|
||||
@Test
|
||||
void shouldSerializeWithException() {
|
||||
JsonNode commandStatusNode = mapper.createObjectNode().put("commandStatus", "nodeWithMessage");
|
||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
||||
Exception exception = assertThrows(
|
||||
UnprocessableEntityException.class,
|
||||
() -> strategy.serializeResponse(node)
|
||||
);
|
||||
|
||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
||||
}
|
||||
|
||||
@SneakyThrows
|
||||
private JsonNode getResponseWithMessage(String message) {
|
||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("message", message);
|
||||
JsonNode commandStatusNode = mapper.createObjectNode().set("commandStatus", nodeWithMessage);
|
||||
return mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
||||
}
|
||||
}
|
|
@ -1,196 +0,0 @@
|
|||
package com.provectus.kafka.ui.util;
|
||||
|
||||
import static org.assertj.core.api.Assertions.assertThat;
|
||||
|
||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||
import org.apache.kafka.clients.consumer.MockConsumer;
|
||||
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
|
||||
import org.apache.kafka.common.PartitionInfo;
|
||||
import org.apache.kafka.common.TopicPartition;
|
||||
import org.apache.kafka.common.utils.Bytes;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Nested;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
class OffsetsSeekTest {
|
||||
|
||||
final String topic = "test";
|
||||
final TopicPartition tp0 = new TopicPartition(topic, 0); //offsets: start 0, end 0
|
||||
final TopicPartition tp1 = new TopicPartition(topic, 1); //offsets: start 10, end 10
|
||||
final TopicPartition tp2 = new TopicPartition(topic, 2); //offsets: start 0, end 20
|
||||
final TopicPartition tp3 = new TopicPartition(topic, 3); //offsets: start 25, end 30
|
||||
|
||||
MockConsumer<Bytes, Bytes> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
|
||||
|
||||
@BeforeEach
|
||||
void initConsumer() {
|
||||
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
|
||||
consumer.updatePartitions(
|
||||
topic,
|
||||
Stream.of(tp0, tp1, tp2, tp3)
|
||||
.map(tp -> new PartitionInfo(topic, tp.partition(), null, null, null, null))
|
||||
.collect(Collectors.toList()));
|
||||
consumer.updateBeginningOffsets(Map.of(
|
||||
tp0, 0L,
|
||||
tp1, 10L,
|
||||
tp2, 0L,
|
||||
tp3, 25L
|
||||
));
|
||||
consumer.updateEndOffsets(Map.of(
|
||||
tp0, 0L,
|
||||
tp1, 10L,
|
||||
tp2, 20L,
|
||||
tp3, 30L
|
||||
));
|
||||
}
|
||||
|
||||
@Test
|
||||
void forwardSeekToBeginningAllPartitions() {
|
||||
var seek = new OffsetsSeekForward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekTypeDTO.BEGINNING,
|
||||
Map.of(tp0, 0L, tp1, 0L),
|
||||
SeekDirectionDTO.FORWARD
|
||||
)
|
||||
);
|
||||
|
||||
seek.assignAndSeek(consumer);
|
||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1);
|
||||
assertThat(consumer.position(tp0)).isZero();
|
||||
assertThat(consumer.position(tp1)).isEqualTo(10L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void backwardSeekToBeginningAllPartitions() {
|
||||
var seek = new OffsetsSeekBackward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekTypeDTO.BEGINNING,
|
||||
Map.of(tp2, 0L, tp3, 0L),
|
||||
SeekDirectionDTO.BACKWARD
|
||||
),
|
||||
10
|
||||
);
|
||||
|
||||
seek.assignAndSeek(consumer);
|
||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2, tp3);
|
||||
assertThat(consumer.position(tp2)).isEqualTo(20L);
|
||||
assertThat(consumer.position(tp3)).isEqualTo(30L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void forwardSeekToBeginningWithPartitionsList() {
|
||||
var seek = new OffsetsSeekForward(
|
||||
topic,
|
||||
new ConsumerPosition(SeekTypeDTO.BEGINNING, Map.of(), SeekDirectionDTO.FORWARD));
|
||||
seek.assignAndSeek(consumer);
|
||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
|
||||
assertThat(consumer.position(tp0)).isZero();
|
||||
assertThat(consumer.position(tp1)).isEqualTo(10L);
|
||||
assertThat(consumer.position(tp2)).isZero();
|
||||
assertThat(consumer.position(tp3)).isEqualTo(25L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void backwardSeekToBeginningWithPartitionsList() {
|
||||
var seek = new OffsetsSeekBackward(
|
||||
topic,
|
||||
new ConsumerPosition(SeekTypeDTO.BEGINNING, Map.of(), SeekDirectionDTO.BACKWARD),
|
||||
10
|
||||
);
|
||||
seek.assignAndSeek(consumer);
|
||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
|
||||
assertThat(consumer.position(tp0)).isZero();
|
||||
assertThat(consumer.position(tp1)).isEqualTo(10L);
|
||||
assertThat(consumer.position(tp2)).isEqualTo(20L);
|
||||
assertThat(consumer.position(tp3)).isEqualTo(30L);
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
void forwardSeekToOffset() {
|
||||
var seek = new OffsetsSeekForward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekTypeDTO.OFFSET,
|
||||
Map.of(tp0, 0L, tp1, 1L, tp2, 2L),
|
||||
SeekDirectionDTO.FORWARD
|
||||
)
|
||||
);
|
||||
seek.assignAndSeek(consumer);
|
||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
|
||||
assertThat(consumer.position(tp2)).isEqualTo(2L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void backwardSeekToOffset() {
|
||||
var seek = new OffsetsSeekBackward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekTypeDTO.OFFSET,
|
||||
Map.of(tp0, 0L, tp1, 1L, tp2, 20L),
|
||||
SeekDirectionDTO.BACKWARD
|
||||
),
|
||||
2
|
||||
);
|
||||
seek.assignAndSeek(consumer);
|
||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
|
||||
assertThat(consumer.position(tp2)).isEqualTo(20L);
|
||||
}
|
||||
|
||||
@Test
|
||||
void backwardSeekToOffsetOnlyOnePartition() {
|
||||
var seek = new OffsetsSeekBackward(
|
||||
topic,
|
||||
new ConsumerPosition(
|
||||
SeekTypeDTO.OFFSET,
|
||||
Map.of(tp2, 20L),
|
||||
SeekDirectionDTO.BACKWARD
|
||||
),
|
||||
20
|
||||
);
|
||||
seek.assignAndSeek(consumer);
|
||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
|
||||
assertThat(consumer.position(tp2)).isEqualTo(20L);
|
||||
}
|
||||
|
||||
|
||||
@Nested
|
||||
class WaitingOffsetsTest {
|
||||
|
||||
OffsetsSeekForward.WaitingOffsets offsets;
|
||||
|
||||
@BeforeEach
|
||||
void assignAndCreateOffsets() {
|
||||
consumer.assign(List.of(tp0, tp1, tp2, tp3));
|
||||
offsets = new OffsetsSeek.WaitingOffsets(topic, consumer, List.of(tp0, tp1, tp2, tp3));
|
||||
}
|
||||
|
||||
@Test
|
||||
void collectsSignificantOffsetsMinus1ForAssignedPartitions() {
|
||||
// offsets for partition 0 & 1 should be skipped because they
|
||||
// effectively contains no data (start offset = end offset)
|
||||
assertThat(offsets.getEndOffsets()).containsExactlyInAnyOrderEntriesOf(
|
||||
Map.of(2, 19L, 3, 29L)
|
||||
);
|
||||
}
|
||||
|
||||
@Test
|
||||
void returnTrueWhenOffsetsReachedReached() {
|
||||
assertThat(offsets.endReached()).isFalse();
|
||||
offsets.markPolled(new ConsumerRecord<>(topic, 2, 19, null, null));
|
||||
assertThat(offsets.endReached()).isFalse();
|
||||
offsets.markPolled(new ConsumerRecord<>(topic, 3, 29, null, null));
|
||||
assertThat(offsets.endReached()).isTrue();
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -862,28 +862,6 @@ paths:
|
|||
200:
|
||||
description: OK
|
||||
|
||||
/api/clusters/{clusterName}/consumer-groups:
|
||||
get:
|
||||
tags:
|
||||
- Consumer Groups
|
||||
summary: get all ConsumerGroups
|
||||
operationId: getConsumerGroups
|
||||
parameters:
|
||||
- name: clusterName
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: '#/components/schemas/ConsumerGroup'
|
||||
|
||||
/api/clusters/{clusterName}/consumer-groups/{id}/offsets:
|
||||
post:
|
||||
tags:
|
||||
|
@ -1561,31 +1539,6 @@ paths:
|
|||
200:
|
||||
description: OK
|
||||
|
||||
/api/clusters/{clusterName}/ksql:
|
||||
description: Deprecated - use ksql/v2 instead!
|
||||
post:
|
||||
tags:
|
||||
- Ksql
|
||||
summary: executeKsqlCommand
|
||||
operationId: executeKsqlCommand
|
||||
parameters:
|
||||
- name: clusterName
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/KsqlCommand'
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/KsqlCommandResponse'
|
||||
|
||||
/api/clusters/{clusterName}/ksql/v2:
|
||||
post:
|
||||
|
@ -1885,7 +1838,7 @@ paths:
|
|||
get:
|
||||
tags:
|
||||
- TimeStampFormat
|
||||
summary: getTimeStampFormat
|
||||
summary: get system default datetime format
|
||||
operationId: getTimeStampFormat
|
||||
responses:
|
||||
200:
|
||||
|
@ -1894,6 +1847,21 @@ paths:
|
|||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/TimeStampFormat'
|
||||
|
||||
/api/info/timestampformat/iso:
|
||||
get:
|
||||
tags:
|
||||
- TimeStampFormat
|
||||
summary: get system default datetime format (in ISO format, for JS)
|
||||
operationId: getTimeStampFormatISO
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/TimeStampFormat'
|
||||
|
||||
components:
|
||||
schemas:
|
||||
TopicSerdeSuggestion:
|
||||
|
@ -3094,18 +3062,6 @@ components:
|
|||
items:
|
||||
$ref: '#/components/schemas/ConnectorPluginConfig'
|
||||
|
||||
KsqlCommand:
|
||||
type: object
|
||||
properties:
|
||||
ksql:
|
||||
type: string
|
||||
streamsProperties:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
required:
|
||||
- ksql
|
||||
|
||||
KsqlCommandV2:
|
||||
type: object
|
||||
properties:
|
||||
|
@ -3152,31 +3108,6 @@ components:
|
|||
valueFormat:
|
||||
type: string
|
||||
|
||||
KsqlCommandResponse:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
$ref: '#/components/schemas/Table'
|
||||
message:
|
||||
type: string
|
||||
|
||||
Table:
|
||||
type: object
|
||||
properties:
|
||||
headers:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
rows:
|
||||
type: array
|
||||
items:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
required:
|
||||
- headers
|
||||
- rows
|
||||
|
||||
KsqlResponse:
|
||||
type: object
|
||||
properties:
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
<maven.surefire-plugin.version>2.22.2</maven.surefire-plugin.version>
|
||||
<allure-maven.version>2.10.0</allure-maven.version>
|
||||
<kafka.version>3.0.0</kafka.version>
|
||||
<netty.version>4.1.77.Final</netty.version>
|
||||
<netty.version>4.1.84.Final</netty.version>
|
||||
<qase.io.version>2.1.3</qase.io.version>
|
||||
</properties>
|
||||
|
||||
|
|
|
@ -6,5 +6,5 @@ import lombok.experimental.Accessors;
|
|||
@Data
|
||||
@Accessors(chain = true)
|
||||
public class Topic {
|
||||
private String name, compactPolicyValue, timeToRetainData, maxSizeOnDisk, maxMessageBytes, messageKey, messageContent ;
|
||||
private String name, cleanupPolicyValue, timeToRetainData, maxSizeOnDisk, maxMessageBytes, messageKey, messageContent ;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,8 @@ public class KafkaConnectList {
|
|||
|
||||
@Step
|
||||
public KafkaConnectList openConnector(String connectorName) {
|
||||
$x(String.format(tabElementLocator,connectorName)).shouldBe(Condition.visible).click();
|
||||
$x(String.format(tabElementLocator,connectorName))
|
||||
.shouldBe(Condition.enabled).click();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,8 @@ public class SchemaRegistryList {
|
|||
|
||||
@Step
|
||||
public SchemaRegistryList openSchema(String schemaName) {
|
||||
$x(String.format(schemaTabElementLocator,schemaName)).shouldBe(Condition.visible).click();
|
||||
$x(String.format(schemaTabElementLocator,schemaName))
|
||||
.shouldBe(Condition.enabled).click();
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@ public class TopicDetails {
|
|||
|
||||
protected SelenideElement loadingSpinner = $x("//*[contains(text(),'Loading')]");
|
||||
protected SelenideElement dotMenuBtn = $$x("//button[@aria-label='Dropdown Toggle']").first();
|
||||
protected SelenideElement dotPartitionIdMenuBtn = $(By.cssSelector("button.sc-hOqruk.eYtACj"));
|
||||
protected SelenideElement clearMessagesBtn = $x(("//div[contains(text(), 'Clear messages')]"));
|
||||
protected SelenideElement overviewTab = $x("//a[contains(text(),'Overview')]");
|
||||
protected SelenideElement messagesTab = $x("//a[contains(text(),'Messages')]");
|
||||
protected SelenideElement editSettingsTab = $x("//li[@role][contains(text(),'Edit settings')]");
|
||||
|
@ -45,6 +47,18 @@ public class TopicDetails {
|
|||
return this;
|
||||
}
|
||||
|
||||
@Step
|
||||
public TopicDetails openDotPartitionIdMenu() {
|
||||
dotPartitionIdMenuBtn.shouldBe(Condition.visible.because("dot menu invisible")).click();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Step
|
||||
public TopicDetails clickClearMessagesBtn() {
|
||||
clearMessagesBtn.shouldBe(Condition.visible.because("Clear Messages invisible")).click();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Step
|
||||
public TopicDetails deleteTopic() {
|
||||
clickByJavaScript(dotMenuBtn);
|
||||
|
@ -70,6 +84,11 @@ public class TopicDetails {
|
|||
return contentMessage.matches(contentMessageTab.getText().trim());
|
||||
}
|
||||
|
||||
@Step
|
||||
public String MessageCountAmount() {
|
||||
return $(By.xpath("//table[@class=\"sc-hiSbEG cvnuic\"]/tbody/tr/td[5]")).getText();
|
||||
}
|
||||
|
||||
private enum DotMenuHeaderItems {
|
||||
EDIT_SETTINGS("Edit settings"),
|
||||
CLEAR_MESSAGES("Clear messages"),
|
||||
|
@ -91,6 +110,26 @@ public class TopicDetails {
|
|||
}
|
||||
}
|
||||
|
||||
public enum DotPartitionIdMenu {
|
||||
CLEAR_MESSAGES("Clear messages");
|
||||
|
||||
|
||||
private final String value;
|
||||
|
||||
DotPartitionIdMenu(String value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String getValue() {
|
||||
return value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "DotPartitionIdMenuItems{" + "value='" + value + '\'' + '}';
|
||||
}
|
||||
}
|
||||
|
||||
public enum TopicMenu {
|
||||
OVERVIEW("Overview"),
|
||||
MESSAGES("Messages"),
|
||||
|
|
|
@ -42,7 +42,8 @@ public class TopicsList {
|
|||
|
||||
@Step
|
||||
public TopicsList openTopic(String topicName) {
|
||||
$(By.linkText(topicName)).click();
|
||||
$(By.linkText(topicName))
|
||||
.shouldBe(Condition.enabled).click();
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import com.provectus.kafka.ui.pages.topic.TopicDetails;
|
|||
import com.provectus.kafka.ui.utilities.qaseIoUtils.annotations.AutomationStatus;
|
||||
import com.provectus.kafka.ui.utilities.qaseIoUtils.annotations.Suite;
|
||||
import com.provectus.kafka.ui.utilities.qaseIoUtils.enums.Status;
|
||||
import io.qameta.allure.Issue;
|
||||
import io.qase.api.annotation.CaseId;
|
||||
import org.assertj.core.api.SoftAssertions;
|
||||
import org.junit.jupiter.api.*;
|
||||
|
@ -14,6 +15,7 @@ import java.util.ArrayList;
|
|||
import java.util.List;
|
||||
|
||||
import static com.provectus.kafka.ui.pages.NaviSideBar.SideMenuOption.TOPICS;
|
||||
import static com.provectus.kafka.ui.pages.topic.TopicDetails.DotPartitionIdMenu.CLEAR_MESSAGES;
|
||||
import static com.provectus.kafka.ui.settings.Source.CLUSTER_NAME;
|
||||
import static com.provectus.kafka.ui.utilities.FileUtils.fileToString;
|
||||
|
||||
|
@ -23,18 +25,23 @@ public class TopicTests extends BaseTest {
|
|||
private static final String SUITE_TITLE = "Topics";
|
||||
private static final Topic TOPIC_FOR_UPDATE = new Topic()
|
||||
.setName("topic-to-update")
|
||||
.setCompactPolicyValue("Compact")
|
||||
.setCleanupPolicyValue("Compact")
|
||||
.setTimeToRetainData("604800001")
|
||||
.setMaxSizeOnDisk("20 GB")
|
||||
.setMaxMessageBytes("1000020")
|
||||
.setMessageKey(fileToString(System.getProperty("user.dir") + "/src/test/resources/producedkey.txt"))
|
||||
.setMessageContent(fileToString(System.getProperty("user.dir") + "/src/test/resources/testData.txt"));
|
||||
private static final Topic TOPIC_FOR_MESSAGES = new Topic()
|
||||
.setName("topic-with-clean-message-attribute")
|
||||
.setMessageKey(fileToString(System.getProperty("user.dir") + "/src/test/resources/producedkey.txt"))
|
||||
.setMessageContent(fileToString(System.getProperty("user.dir") + "/src/test/resources/testData.txt"));
|
||||
|
||||
private static final Topic TOPIC_FOR_DELETE = new Topic().setName("topic-to-delete");
|
||||
private static final List<Topic> TOPIC_LIST = new ArrayList<>();
|
||||
|
||||
@BeforeAll
|
||||
public void beforeAll() {
|
||||
TOPIC_LIST.addAll(List.of(TOPIC_FOR_UPDATE, TOPIC_FOR_DELETE));
|
||||
TOPIC_LIST.addAll(List.of(TOPIC_FOR_UPDATE, TOPIC_FOR_DELETE, TOPIC_FOR_MESSAGES));
|
||||
TOPIC_LIST.forEach(topic -> apiHelper.createTopic(CLUSTER_NAME, topic.getName()));
|
||||
}
|
||||
|
||||
|
@ -81,7 +88,7 @@ public class TopicTests extends BaseTest {
|
|||
.openEditSettings();
|
||||
topicCreateEditForm
|
||||
.waitUntilScreenReady()
|
||||
.selectCleanupPolicy(TOPIC_FOR_UPDATE.getCompactPolicyValue())
|
||||
.selectCleanupPolicy(TOPIC_FOR_UPDATE.getCleanupPolicyValue())
|
||||
.setMinInsyncReplicas(10)
|
||||
.setTimeToRetainDataInMs(TOPIC_FOR_UPDATE.getTimeToRetainData())
|
||||
.setMaxSizeOnDiskInGB(TOPIC_FOR_UPDATE.getMaxSizeOnDisk())
|
||||
|
@ -98,7 +105,7 @@ public class TopicTests extends BaseTest {
|
|||
.waitUntilScreenReady()
|
||||
.openEditSettings();
|
||||
SoftAssertions softly = new SoftAssertions();
|
||||
softly.assertThat(topicCreateEditForm.getCleanupPolicy()).as("Cleanup Policy").isEqualTo(TOPIC_FOR_UPDATE.getCompactPolicyValue());
|
||||
softly.assertThat(topicCreateEditForm.getCleanupPolicy()).as("Cleanup Policy").isEqualTo(TOPIC_FOR_UPDATE.getCleanupPolicyValue());
|
||||
softly.assertThat(topicCreateEditForm.getTimeToRetain()).as("Time to retain").isEqualTo(TOPIC_FOR_UPDATE.getTimeToRetainData());
|
||||
softly.assertThat(topicCreateEditForm.getMaxSizeOnDisk()).as("Max size on disk").isEqualTo(TOPIC_FOR_UPDATE.getMaxSizeOnDisk());
|
||||
softly.assertThat(topicCreateEditForm.getMaxMessageBytes()).as("Max message bytes").isEqualTo(TOPIC_FOR_UPDATE.getMaxMessageBytes());
|
||||
|
@ -126,7 +133,7 @@ public class TopicTests extends BaseTest {
|
|||
Assertions.assertFalse(topicsList.isTopicVisible(TOPIC_FOR_DELETE.getName()), "isTopicVisible");
|
||||
TOPIC_LIST.remove(TOPIC_FOR_DELETE);
|
||||
}
|
||||
|
||||
|
||||
@DisplayName("produce message")
|
||||
@Suite(suiteId = SUITE_ID, title = SUITE_TITLE)
|
||||
@AutomationStatus(status = Status.AUTOMATED)
|
||||
|
@ -137,24 +144,55 @@ public class TopicTests extends BaseTest {
|
|||
.openSideMenu(TOPICS);
|
||||
topicsList
|
||||
.waitUntilScreenReady()
|
||||
.openTopic(TOPIC_FOR_UPDATE.getName());
|
||||
.openTopic(TOPIC_FOR_MESSAGES.getName());
|
||||
topicDetails
|
||||
.waitUntilScreenReady()
|
||||
.openTopicMenu(TopicDetails.TopicMenu.MESSAGES)
|
||||
.clickProduceMessageBtn();
|
||||
produceMessagePanel
|
||||
.waitUntilScreenReady()
|
||||
.setContentFiled(TOPIC_FOR_UPDATE.getMessageContent())
|
||||
.setKeyField(TOPIC_FOR_UPDATE.getMessageKey())
|
||||
.setContentFiled(TOPIC_FOR_MESSAGES.getMessageContent())
|
||||
.setKeyField(TOPIC_FOR_MESSAGES.getMessageKey())
|
||||
.submitProduceMessage();
|
||||
topicDetails
|
||||
.waitUntilScreenReady();
|
||||
SoftAssertions softly = new SoftAssertions();
|
||||
softly.assertThat(topicDetails.isKeyMessageVisible((TOPIC_FOR_UPDATE.getMessageKey()))).withFailMessage("isKeyMessageVisible()").isTrue();
|
||||
softly.assertThat(topicDetails.isContentMessageVisible((TOPIC_FOR_UPDATE.getMessageContent()).trim())).withFailMessage("isContentMessageVisible()").isTrue();
|
||||
softly.assertThat(topicDetails.isKeyMessageVisible((TOPIC_FOR_MESSAGES.getMessageKey()))).withFailMessage("isKeyMessageVisible()").isTrue();
|
||||
softly.assertThat(topicDetails.isContentMessageVisible((TOPIC_FOR_MESSAGES.getMessageContent()).trim())).withFailMessage("isContentMessageVisible()").isTrue();
|
||||
softly.assertAll();
|
||||
}
|
||||
|
||||
@Issue("Uncomment last assertion after bug https://github.com/provectus/kafka-ui/issues/2778 fix")
|
||||
@DisplayName("clear message")
|
||||
@Suite(suiteId = SUITE_ID, title = SUITE_TITLE)
|
||||
@AutomationStatus(status = Status.AUTOMATED)
|
||||
@CaseId(19)
|
||||
@Test
|
||||
void clearMessage() {
|
||||
naviSideBar
|
||||
.openSideMenu(TOPICS);
|
||||
topicsList
|
||||
.waitUntilScreenReady()
|
||||
.openTopic(TOPIC_FOR_MESSAGES.getName());
|
||||
topicDetails
|
||||
.waitUntilScreenReady()
|
||||
.openTopicMenu(TopicDetails.TopicMenu.OVERVIEW)
|
||||
.clickProduceMessageBtn();
|
||||
produceMessagePanel
|
||||
.waitUntilScreenReady()
|
||||
.setContentFiled(TOPIC_FOR_MESSAGES.getMessageContent())
|
||||
.setKeyField(TOPIC_FOR_MESSAGES.getMessageKey())
|
||||
.submitProduceMessage();
|
||||
topicDetails
|
||||
.waitUntilScreenReady();
|
||||
String messageAmount = topicDetails.MessageCountAmount();
|
||||
Assertions.assertEquals(messageAmount,topicDetails.MessageCountAmount());
|
||||
topicDetails
|
||||
.openDotPartitionIdMenu()
|
||||
.clickClearMessagesBtn();
|
||||
// Assertions.assertEquals(Integer.toString(Integer.valueOf(messageAmount)-1),topicDetails.MessageCountAmount());
|
||||
}
|
||||
|
||||
@AfterAll
|
||||
public void afterAll() {
|
||||
TOPIC_LIST.forEach(topic -> apiHelper.deleteTopic(CLUSTER_NAME, topic.getName()));
|
||||
|
|
|
@ -66,4 +66,4 @@ pnpm start
|
|||
```
|
||||
## Links
|
||||
|
||||
* [Create React App](https://github.com/facebook/create-react-app)
|
||||
* [Vite](https://github.com/vitejs/vite)
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
"@tanstack/react-table": "^8.5.10",
|
||||
"@testing-library/react": "^13.2.0",
|
||||
"@types/testing-library__jest-dom": "^5.14.5",
|
||||
"@types/yup": "^0.29.13",
|
||||
"@vitejs/plugin-react": "^2.0.0",
|
||||
"ace-builds": "^1.7.1",
|
||||
"ajv": "^8.6.3",
|
||||
|
@ -47,7 +46,7 @@
|
|||
"vite": "^3.0.2",
|
||||
"vite-tsconfig-paths": "^3.5.0",
|
||||
"whatwg-fetch": "^3.6.2",
|
||||
"yup": "^0.32.9",
|
||||
"yup": "^0.32.11",
|
||||
"zustand": "^4.1.1"
|
||||
},
|
||||
"lint-staged": {
|
||||
|
@ -83,7 +82,7 @@
|
|||
"@openapitools/openapi-generator-cli": "^2.5.1",
|
||||
"@testing-library/dom": "^8.11.1",
|
||||
"@testing-library/jest-dom": "^5.16.4",
|
||||
"@testing-library/user-event": "^13.5.0",
|
||||
"@testing-library/user-event": "^14.4.3",
|
||||
"@types/eventsource": "^1.1.8",
|
||||
"@types/jest": "^29.0.1",
|
||||
"@types/lodash": "^4.14.172",
|
||||
|
|
19
kafka-ui-react-app/pnpm-lock.yaml
generated
19
kafka-ui-react-app/pnpm-lock.yaml
generated
|
@ -19,7 +19,7 @@ specifiers:
|
|||
'@testing-library/dom': ^8.11.1
|
||||
'@testing-library/jest-dom': ^5.16.4
|
||||
'@testing-library/react': ^13.2.0
|
||||
'@testing-library/user-event': ^13.5.0
|
||||
'@testing-library/user-event': ^14.4.3
|
||||
'@types/eventsource': ^1.1.8
|
||||
'@types/jest': ^29.0.1
|
||||
'@types/lodash': ^4.14.172
|
||||
|
@ -30,7 +30,6 @@ specifiers:
|
|||
'@types/react-router-dom': ^5.3.3
|
||||
'@types/styled-components': ^5.1.13
|
||||
'@types/testing-library__jest-dom': ^5.14.5
|
||||
'@types/yup': ^0.29.13
|
||||
'@typescript-eslint/eslint-plugin': ^5.29.0
|
||||
'@typescript-eslint/parser': ^5.29.0
|
||||
'@vitejs/plugin-react': ^2.0.0
|
||||
|
@ -88,7 +87,7 @@ specifiers:
|
|||
vite: ^3.0.2
|
||||
vite-tsconfig-paths: ^3.5.0
|
||||
whatwg-fetch: ^3.6.2
|
||||
yup: ^0.32.9
|
||||
yup: ^0.32.11
|
||||
zustand: ^4.1.1
|
||||
|
||||
dependencies:
|
||||
|
@ -104,7 +103,6 @@ dependencies:
|
|||
'@tanstack/react-table': 8.5.10_ef5jwxihqo6n7gxfmzogljlgcm
|
||||
'@testing-library/react': 13.2.0_ef5jwxihqo6n7gxfmzogljlgcm
|
||||
'@types/testing-library__jest-dom': 5.14.5
|
||||
'@types/yup': 0.29.13
|
||||
'@vitejs/plugin-react': 2.0.0_vite@3.0.2
|
||||
ace-builds: 1.7.1
|
||||
ajv: 8.8.2
|
||||
|
@ -146,7 +144,7 @@ devDependencies:
|
|||
'@openapitools/openapi-generator-cli': 2.5.1
|
||||
'@testing-library/dom': 8.13.0
|
||||
'@testing-library/jest-dom': 5.16.4
|
||||
'@testing-library/user-event': 13.5.0_tlwynutqiyp5mns3woioasuxnq
|
||||
'@testing-library/user-event': 14.4.3_tlwynutqiyp5mns3woioasuxnq
|
||||
'@types/eventsource': 1.1.8
|
||||
'@types/jest': 29.0.1
|
||||
'@types/lodash': 4.14.177
|
||||
|
@ -3339,13 +3337,12 @@ packages:
|
|||
react-dom: 18.1.0_react@18.1.0
|
||||
dev: false
|
||||
|
||||
/@testing-library/user-event/13.5.0_tlwynutqiyp5mns3woioasuxnq:
|
||||
resolution: {integrity: sha512-5Kwtbo3Y/NowpkbRuSepbyMFkZmHgD+vPzYB/RJ4oxt5Gj/avFFBYjhw27cqSVPVw/3a67NK1PbiIr9k4Gwmdg==}
|
||||
engines: {node: '>=10', npm: '>=6'}
|
||||
/@testing-library/user-event/14.4.3_tlwynutqiyp5mns3woioasuxnq:
|
||||
resolution: {integrity: sha512-kCUc5MEwaEMakkO5x7aoD+DLi02ehmEM2QCGWvNqAS1dV/fAvORWEjnjsEIvml59M7Y5kCkWN6fCCyPOe8OL6Q==}
|
||||
engines: {node: '>=12', npm: '>=6'}
|
||||
peerDependencies:
|
||||
'@testing-library/dom': '>=7.21.4'
|
||||
dependencies:
|
||||
'@babel/runtime': 7.17.9
|
||||
'@testing-library/dom': 8.13.0
|
||||
dev: true
|
||||
|
||||
|
@ -3546,10 +3543,6 @@ packages:
|
|||
dependencies:
|
||||
'@types/yargs-parser': 20.2.0
|
||||
|
||||
/@types/yup/0.29.13:
|
||||
resolution: {integrity: sha512-qRyuv+P/1t1JK1rA+elmK1MmCL1BapEzKKfbEhDBV/LMMse4lmhZ/XbgETI39JveDJRpLjmToOI6uFtMW/WR2g==}
|
||||
dev: false
|
||||
|
||||
/@typescript-eslint/eslint-plugin/5.29.0_uaxwak76nssfibsnotx5epygnu:
|
||||
resolution: {integrity: sha512-kgTsISt9pM53yRFQmLZ4npj99yGl3x3Pl7z4eA66OuTzAGC4bQB5H5fuLwPnqTKU3yyrrg4MIhjF17UYnL4c0w==}
|
||||
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
|
||||
|
|
|
@ -2,7 +2,6 @@ import React from 'react';
|
|||
import { render, WithRoute } from 'lib/testHelpers';
|
||||
import { screen } from '@testing-library/dom';
|
||||
import { clusterBrokerPath } from 'lib/paths';
|
||||
import { act } from '@testing-library/react';
|
||||
import { brokerLogDirsPayload } from 'lib/fixtures/brokers';
|
||||
import { useBrokerLogDirs } from 'lib/hooks/api/brokers';
|
||||
import { BrokerLogdirs } from 'generated-sources';
|
||||
|
@ -20,16 +19,14 @@ describe('BrokerLogdir Component', () => {
|
|||
(useBrokerLogDirs as jest.Mock).mockImplementation(() => ({
|
||||
data: payload,
|
||||
}));
|
||||
await act(() => {
|
||||
render(
|
||||
<WithRoute path={clusterBrokerPath()}>
|
||||
<BrokerLogdir />
|
||||
</WithRoute>,
|
||||
{
|
||||
initialEntries: [clusterBrokerPath(clusterName, brokerId)],
|
||||
}
|
||||
);
|
||||
});
|
||||
await render(
|
||||
<WithRoute path={clusterBrokerPath()}>
|
||||
<BrokerLogdir />
|
||||
</WithRoute>,
|
||||
{
|
||||
initialEntries: [clusterBrokerPath(clusterName, brokerId)],
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
it('shows warning when server returns undefined logDirs response', async () => {
|
||||
|
|
|
@ -6,7 +6,6 @@ import { useBrokerConfig } from 'lib/hooks/api/brokers';
|
|||
import { brokerConfigPayload } from 'lib/fixtures/brokers';
|
||||
import Configs from 'components/Brokers/Broker/Configs/Configs';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import { act } from '@testing-library/react';
|
||||
|
||||
const clusterName = 'Cluster_Name';
|
||||
const brokerId = 'Broker_Id';
|
||||
|
@ -42,9 +41,7 @@ describe('Configs', () => {
|
|||
});
|
||||
|
||||
it('updates textbox value', async () => {
|
||||
await act(() => {
|
||||
userEvent.click(screen.getAllByLabelText('editAction')[0]);
|
||||
});
|
||||
await userEvent.click(screen.getAllByLabelText('editAction')[0]);
|
||||
|
||||
const textbox = screen.getByLabelText('inputValue');
|
||||
expect(textbox).toBeInTheDocument();
|
||||
|
@ -59,9 +56,9 @@ describe('Configs', () => {
|
|||
screen.getByRole('button', { name: 'cancelAction' })
|
||||
).toBeInTheDocument();
|
||||
|
||||
await act(() => {
|
||||
userEvent.click(screen.getByRole('button', { name: 'confirmAction' }));
|
||||
});
|
||||
await userEvent.click(
|
||||
screen.getByRole('button', { name: 'confirmAction' })
|
||||
);
|
||||
|
||||
expect(
|
||||
screen.getByText('Are you sure you want to change the value?')
|
||||
|
|
|
@ -2,7 +2,6 @@ import React from 'react';
|
|||
import { render, WithRoute } from 'lib/testHelpers';
|
||||
import { screen, waitFor } from '@testing-library/dom';
|
||||
import { clusterBrokerPath, clusterBrokersPath } from 'lib/paths';
|
||||
import { act } from '@testing-library/react';
|
||||
import BrokersList from 'components/Brokers/BrokersList/BrokersList';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import { useBrokers } from 'lib/hooks/api/brokers';
|
||||
|
@ -57,9 +56,8 @@ describe('BrokersList Component', () => {
|
|||
});
|
||||
it('opens broker when row clicked', async () => {
|
||||
renderComponent();
|
||||
await act(() => {
|
||||
userEvent.click(screen.getByRole('cell', { name: '0' }));
|
||||
});
|
||||
await userEvent.click(screen.getByRole('cell', { name: '0' }));
|
||||
|
||||
await waitFor(() =>
|
||||
expect(mockedUsedNavigate).toBeCalledWith(
|
||||
clusterBrokerPath(clusterName, '0')
|
||||
|
|
|
@ -6,169 +6,20 @@ export const brokerMetricsPayload: BrokerMetrics = {
|
|||
metrics: [
|
||||
{
|
||||
name: 'TotalFetchRequestsPerSec',
|
||||
canonicalName:
|
||||
'kafka.server:name=TotalFetchRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics',
|
||||
params: {
|
||||
topic: '_connect_status',
|
||||
name: 'TotalFetchRequestsPerSec',
|
||||
type: 'BrokerTopicMetrics',
|
||||
},
|
||||
value: {
|
||||
OneMinuteRate: 19.408369293127542,
|
||||
FifteenMinuteRate: 19.44631556589501,
|
||||
Count: 191615,
|
||||
FiveMinuteRate: 19.464393718807774,
|
||||
MeanRate: 19.4233855043407,
|
||||
labels: {
|
||||
canonicalName:
|
||||
'kafka.server:name=TotalFetchRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics',
|
||||
},
|
||||
value: 10,
|
||||
},
|
||||
{
|
||||
name: 'ZooKeeperRequestLatencyMs',
|
||||
canonicalName:
|
||||
'kafka.server:name=ZooKeeperRequestLatencyMs,type=ZooKeeperClientMetrics',
|
||||
params: {
|
||||
name: 'ZooKeeperRequestLatencyMs',
|
||||
type: 'ZooKeeperClientMetrics',
|
||||
},
|
||||
value: {
|
||||
Mean: 4.907351022183558,
|
||||
StdDev: 10.589608223906348,
|
||||
'75thPercentile': 2,
|
||||
'98thPercentile': 10,
|
||||
Min: 0,
|
||||
'95thPercentile': 5,
|
||||
'99thPercentile': 15,
|
||||
Max: 151,
|
||||
'999thPercentile': 92.79700000000003,
|
||||
Count: 2301,
|
||||
'50thPercentile': 1,
|
||||
},
|
||||
value: 11,
|
||||
},
|
||||
{
|
||||
name: 'RequestHandlerAvgIdlePercent',
|
||||
canonicalName:
|
||||
'kafka.server:name=RequestHandlerAvgIdlePercent,type=KafkaRequestHandlerPool',
|
||||
params: {
|
||||
name: 'RequestHandlerAvgIdlePercent',
|
||||
type: 'KafkaRequestHandlerPool',
|
||||
},
|
||||
value: {
|
||||
OneMinuteRate: 0.9999008788765713,
|
||||
FifteenMinuteRate: 0.9983845959639047,
|
||||
Count: 9937344680371,
|
||||
FiveMinuteRate: 0.9986337207880311,
|
||||
MeanRate: 0.9971616923696525,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'BytesInPerSec',
|
||||
canonicalName:
|
||||
'kafka.server:name=BytesInPerSec,topic=_connect_status,type=BrokerTopicMetrics',
|
||||
params: {
|
||||
topic: '_connect_status',
|
||||
name: 'BytesInPerSec',
|
||||
type: 'BrokerTopicMetrics',
|
||||
},
|
||||
value: {
|
||||
OneMinuteRate: 0,
|
||||
FifteenMinuteRate: 0,
|
||||
Count: 0,
|
||||
FiveMinuteRate: 0,
|
||||
MeanRate: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'FetchMessageConversionsPerSec',
|
||||
canonicalName:
|
||||
'kafka.server:name=FetchMessageConversionsPerSec,topic=__consumer_offsets,type=BrokerTopicMetrics',
|
||||
params: {
|
||||
topic: '__consumer_offsets',
|
||||
name: 'FetchMessageConversionsPerSec',
|
||||
type: 'BrokerTopicMetrics',
|
||||
},
|
||||
value: {
|
||||
OneMinuteRate: 0,
|
||||
FifteenMinuteRate: 0,
|
||||
Count: 0,
|
||||
FiveMinuteRate: 0,
|
||||
MeanRate: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'TotalProduceRequestsPerSec',
|
||||
canonicalName:
|
||||
'kafka.server:name=TotalProduceRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics',
|
||||
params: {
|
||||
topic: '_connect_status',
|
||||
name: 'TotalProduceRequestsPerSec',
|
||||
type: 'BrokerTopicMetrics',
|
||||
},
|
||||
value: {
|
||||
OneMinuteRate: 0,
|
||||
FifteenMinuteRate: 0,
|
||||
Count: 0,
|
||||
FiveMinuteRate: 0,
|
||||
MeanRate: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'MaxLag',
|
||||
canonicalName:
|
||||
'kafka.server:clientId=Replica,name=MaxLag,type=ReplicaFetcherManager',
|
||||
params: {
|
||||
clientId: 'Replica',
|
||||
name: 'MaxLag',
|
||||
type: 'ReplicaFetcherManager',
|
||||
},
|
||||
value: {
|
||||
Value: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'UnderMinIsrPartitionCount',
|
||||
canonicalName:
|
||||
'kafka.server:name=UnderMinIsrPartitionCount,type=ReplicaManager',
|
||||
params: {
|
||||
name: 'UnderMinIsrPartitionCount',
|
||||
type: 'ReplicaManager',
|
||||
},
|
||||
value: {
|
||||
Value: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'ZooKeeperDisconnectsPerSec',
|
||||
canonicalName:
|
||||
'kafka.server:name=ZooKeeperDisconnectsPerSec,type=SessionExpireListener',
|
||||
params: {
|
||||
name: 'ZooKeeperDisconnectsPerSec',
|
||||
type: 'SessionExpireListener',
|
||||
},
|
||||
value: {
|
||||
OneMinuteRate: 0,
|
||||
FifteenMinuteRate: 0,
|
||||
Count: 0,
|
||||
FiveMinuteRate: 0,
|
||||
MeanRate: 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'BytesInPerSec',
|
||||
canonicalName:
|
||||
'kafka.server:name=BytesInPerSec,topic=__confluent.support.metrics,type=BrokerTopicMetrics',
|
||||
params: {
|
||||
topic: '__confluent.support.metrics',
|
||||
name: 'BytesInPerSec',
|
||||
type: 'BrokerTopicMetrics',
|
||||
},
|
||||
value: {
|
||||
OneMinuteRate: 3.093893673470914e-70,
|
||||
FifteenMinuteRate: 0.004057932469784932,
|
||||
Count: 1263,
|
||||
FiveMinuteRate: 1.047243693828501e-12,
|
||||
MeanRate: 0.12704831069266603,
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
export const transformedBrokerMetricsPayload =
|
||||
'{"segmentSize":23,"segmentCount":23,"metrics":[{"name":"TotalFetchRequestsPerSec","canonicalName":"kafka.server:name=TotalFetchRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics","params":{"topic":"_connect_status","name":"TotalFetchRequestsPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":19.408369293127542,"FifteenMinuteRate":19.44631556589501,"Count":191615,"FiveMinuteRate":19.464393718807774,"MeanRate":19.4233855043407}},{"name":"ZooKeeperRequestLatencyMs","canonicalName":"kafka.server:name=ZooKeeperRequestLatencyMs,type=ZooKeeperClientMetrics","params":{"name":"ZooKeeperRequestLatencyMs","type":"ZooKeeperClientMetrics"},"value":{"Mean":4.907351022183558,"StdDev":10.589608223906348,"75thPercentile":2,"98thPercentile":10,"Min":0,"95thPercentile":5,"99thPercentile":15,"Max":151,"999thPercentile":92.79700000000003,"Count":2301,"50thPercentile":1}},{"name":"RequestHandlerAvgIdlePercent","canonicalName":"kafka.server:name=RequestHandlerAvgIdlePercent,type=KafkaRequestHandlerPool","params":{"name":"RequestHandlerAvgIdlePercent","type":"KafkaRequestHandlerPool"},"value":{"OneMinuteRate":0.9999008788765713,"FifteenMinuteRate":0.9983845959639047,"Count":9937344680371,"FiveMinuteRate":0.9986337207880311,"MeanRate":0.9971616923696525}},{"name":"BytesInPerSec","canonicalName":"kafka.server:name=BytesInPerSec,topic=_connect_status,type=BrokerTopicMetrics","params":{"topic":"_connect_status","name":"BytesInPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":0,"FifteenMinuteRate":0,"Count":0,"FiveMinuteRate":0,"MeanRate":0}},{"name":"FetchMessageConversionsPerSec","canonicalName":"kafka.server:name=FetchMessageConversionsPerSec,topic=__consumer_offsets,type=BrokerTopicMetrics","params":{"topic":"__consumer_offsets","name":"FetchMessageConversionsPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":0,"FifteenMinuteRate":0,"Count":0,"FiveMinuteRate":0,"MeanRate":0}},{"name":"TotalProduceRequestsPerSec","canonicalName":"kafka.server:name=TotalProduceRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics","params":{"topic":"_connect_status","name":"TotalProduceRequestsPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":0,"FifteenMinuteRate":0,"Count":0,"FiveMinuteRate":0,"MeanRate":0}},{"name":"MaxLag","canonicalName":"kafka.server:clientId=Replica,name=MaxLag,type=ReplicaFetcherManager","params":{"clientId":"Replica","name":"MaxLag","type":"ReplicaFetcherManager"},"value":{"Value":0}},{"name":"UnderMinIsrPartitionCount","canonicalName":"kafka.server:name=UnderMinIsrPartitionCount,type=ReplicaManager","params":{"name":"UnderMinIsrPartitionCount","type":"ReplicaManager"},"value":{"Value":0}},{"name":"ZooKeeperDisconnectsPerSec","canonicalName":"kafka.server:name=ZooKeeperDisconnectsPerSec,type=SessionExpireListener","params":{"name":"ZooKeeperDisconnectsPerSec","type":"SessionExpireListener"},"value":{"OneMinuteRate":0,"FifteenMinuteRate":0,"Count":0,"FiveMinuteRate":0,"MeanRate":0}},{"name":"BytesInPerSec","canonicalName":"kafka.server:name=BytesInPerSec,topic=__confluent.support.metrics,type=BrokerTopicMetrics","params":{"topic":"__confluent.support.metrics","name":"BytesInPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":3.093893673470914e-70,"FifteenMinuteRate":0.004057932469784932,"Count":1263,"FiveMinuteRate":1.047243693828501e-12,"MeanRate":0.12704831069266603}}]}';
|
||||
'{"segmentSize":23,"segmentCount":23,"metrics":[{"name":"TotalFetchRequestsPerSec","labels":{"canonicalName":"kafka.server:name=TotalFetchRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics"},"value":10},{"name":"ZooKeeperRequestLatencyMs","value":11},{"name":"RequestHandlerAvgIdlePercent"}]}';
|
||||
|
|
|
@ -13,7 +13,6 @@ import {
|
|||
clusterSchemasPath,
|
||||
clusterTopicsPath,
|
||||
} from 'lib/paths';
|
||||
import { act } from 'react-dom/test-utils';
|
||||
import { useClusters } from 'lib/hooks/api/clusters';
|
||||
import { onlineClusterPayload } from 'lib/fixtures/clusters';
|
||||
|
||||
|
@ -54,14 +53,12 @@ describe('Cluster', () => {
|
|||
(useClusters as jest.Mock).mockImplementation(() => ({
|
||||
data: payload,
|
||||
}));
|
||||
await act(() => {
|
||||
render(
|
||||
<WithRoute path={`${clusterPath()}/*`}>
|
||||
<ClusterComponent />
|
||||
</WithRoute>,
|
||||
{ initialEntries: [pathname] }
|
||||
);
|
||||
});
|
||||
await render(
|
||||
<WithRoute path={`${clusterPath()}/*`}>
|
||||
<ClusterComponent />
|
||||
</WithRoute>,
|
||||
{ initialEntries: [pathname] }
|
||||
);
|
||||
};
|
||||
|
||||
it('renders Brokers', async () => {
|
||||
|
|
|
@ -33,10 +33,10 @@ const expectActionButtonsExists = () => {
|
|||
expect(screen.getByText('Restart Failed Tasks')).toBeInTheDocument();
|
||||
expect(screen.getByText('Delete')).toBeInTheDocument();
|
||||
};
|
||||
const afterClickDropDownButton = () => {
|
||||
const afterClickDropDownButton = async () => {
|
||||
const dropDownButton = screen.getAllByRole('button');
|
||||
expect(dropDownButton.length).toEqual(1);
|
||||
userEvent.click(dropDownButton[0]);
|
||||
await userEvent.click(dropDownButton[0]);
|
||||
};
|
||||
describe('Actions', () => {
|
||||
afterEach(() => {
|
||||
|
@ -61,48 +61,48 @@ describe('Actions', () => {
|
|||
{ initialEntries: [path] }
|
||||
);
|
||||
|
||||
it('renders buttons when paused', () => {
|
||||
it('renders buttons when paused', async () => {
|
||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||
data: set({ ...connector }, 'status.state', ConnectorState.PAUSED),
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
await afterClickDropDownButton();
|
||||
expect(screen.getAllByRole('menuitem').length).toEqual(5);
|
||||
expect(screen.getByText('Resume')).toBeInTheDocument();
|
||||
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
||||
expectActionButtonsExists();
|
||||
});
|
||||
|
||||
it('renders buttons when failed', () => {
|
||||
it('renders buttons when failed', async () => {
|
||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||
data: set({ ...connector }, 'status.state', ConnectorState.FAILED),
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
await afterClickDropDownButton();
|
||||
expect(screen.getAllByRole('menuitem').length).toEqual(4);
|
||||
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
||||
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
||||
expectActionButtonsExists();
|
||||
});
|
||||
|
||||
it('renders buttons when unassigned', () => {
|
||||
it('renders buttons when unassigned', async () => {
|
||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||
data: set({ ...connector }, 'status.state', ConnectorState.UNASSIGNED),
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
await afterClickDropDownButton();
|
||||
expect(screen.getAllByRole('menuitem').length).toEqual(4);
|
||||
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
||||
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
||||
expectActionButtonsExists();
|
||||
});
|
||||
|
||||
it('renders buttons when running connector action', () => {
|
||||
it('renders buttons when running connector action', async () => {
|
||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||
data: set({ ...connector }, 'status.state', ConnectorState.RUNNING),
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
await afterClickDropDownButton();
|
||||
expect(screen.getAllByRole('menuitem').length).toEqual(5);
|
||||
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
||||
expect(screen.getByText('Pause')).toBeInTheDocument();
|
||||
|
@ -118,34 +118,34 @@ describe('Actions', () => {
|
|||
|
||||
it('opens confirmation modal when delete button clicked', async () => {
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
await waitFor(() =>
|
||||
await afterClickDropDownButton();
|
||||
await waitFor(async () =>
|
||||
userEvent.click(screen.getByRole('menuitem', { name: 'Delete' }))
|
||||
);
|
||||
expect(screen.getByRole('dialog')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('calls restartConnector when restart button clicked', () => {
|
||||
it('calls restartConnector when restart button clicked', async () => {
|
||||
const restartConnector = jest.fn();
|
||||
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
||||
mutateAsync: restartConnector,
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
userEvent.click(
|
||||
await afterClickDropDownButton();
|
||||
await userEvent.click(
|
||||
screen.getByRole('menuitem', { name: 'Restart Connector' })
|
||||
);
|
||||
expect(restartConnector).toHaveBeenCalledWith(ConnectorAction.RESTART);
|
||||
});
|
||||
|
||||
it('calls restartAllTasks', () => {
|
||||
it('calls restartAllTasks', async () => {
|
||||
const restartAllTasks = jest.fn();
|
||||
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
||||
mutateAsync: restartAllTasks,
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
userEvent.click(
|
||||
await afterClickDropDownButton();
|
||||
await userEvent.click(
|
||||
screen.getByRole('menuitem', { name: 'Restart All Tasks' })
|
||||
);
|
||||
expect(restartAllTasks).toHaveBeenCalledWith(
|
||||
|
@ -153,14 +153,14 @@ describe('Actions', () => {
|
|||
);
|
||||
});
|
||||
|
||||
it('calls restartFailedTasks', () => {
|
||||
it('calls restartFailedTasks', async () => {
|
||||
const restartFailedTasks = jest.fn();
|
||||
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
||||
mutateAsync: restartFailedTasks,
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
userEvent.click(
|
||||
await afterClickDropDownButton();
|
||||
await userEvent.click(
|
||||
screen.getByRole('menuitem', { name: 'Restart Failed Tasks' })
|
||||
);
|
||||
expect(restartFailedTasks).toHaveBeenCalledWith(
|
||||
|
@ -168,18 +168,18 @@ describe('Actions', () => {
|
|||
);
|
||||
});
|
||||
|
||||
it('calls pauseConnector when pause button clicked', () => {
|
||||
it('calls pauseConnector when pause button clicked', async () => {
|
||||
const pauseConnector = jest.fn();
|
||||
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
||||
mutateAsync: pauseConnector,
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
userEvent.click(screen.getByRole('menuitem', { name: 'Pause' }));
|
||||
await afterClickDropDownButton();
|
||||
await userEvent.click(screen.getByRole('menuitem', { name: 'Pause' }));
|
||||
expect(pauseConnector).toHaveBeenCalledWith(ConnectorAction.PAUSE);
|
||||
});
|
||||
|
||||
it('calls resumeConnector when resume button clicked', () => {
|
||||
it('calls resumeConnector when resume button clicked', async () => {
|
||||
const resumeConnector = jest.fn();
|
||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||
data: set({ ...connector }, 'status.state', ConnectorState.PAUSED),
|
||||
|
@ -188,8 +188,8 @@ describe('Actions', () => {
|
|||
mutateAsync: resumeConnector,
|
||||
}));
|
||||
renderComponent();
|
||||
afterClickDropDownButton();
|
||||
userEvent.click(screen.getByRole('menuitem', { name: 'Resume' }));
|
||||
await afterClickDropDownButton();
|
||||
await userEvent.click(screen.getByRole('menuitem', { name: 'Resume' }));
|
||||
expect(resumeConnector).toHaveBeenCalledWith(ConnectorAction.RESUME);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -57,7 +57,7 @@ describe('Tasks', () => {
|
|||
).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('renders truncates long trace and expands', () => {
|
||||
it('renders truncates long trace and expands', async () => {
|
||||
renderComponent(tasks);
|
||||
|
||||
const trace = tasks[2]?.status?.trace || '';
|
||||
|
@ -72,7 +72,7 @@ describe('Tasks', () => {
|
|||
// Full trace is not visible
|
||||
expect(expandedDetails).not.toBeInTheDocument();
|
||||
|
||||
userEvent.click(thirdRow);
|
||||
await userEvent.click(thirdRow);
|
||||
|
||||
expect(
|
||||
screen.getByRole('row', {
|
||||
|
@ -82,7 +82,7 @@ describe('Tasks', () => {
|
|||
});
|
||||
|
||||
describe('Action button', () => {
|
||||
const expectDropdownExists = () => {
|
||||
const expectDropdownExists = async () => {
|
||||
const firstTaskRow = screen.getByRole('row', {
|
||||
name: '1 kafka-connect0:8083 RUNNING',
|
||||
});
|
||||
|
@ -91,13 +91,13 @@ describe('Tasks', () => {
|
|||
name: 'Dropdown Toggle',
|
||||
});
|
||||
expect(extBtn).toBeEnabled();
|
||||
userEvent.click(extBtn);
|
||||
await userEvent.click(extBtn);
|
||||
expect(screen.getByRole('menu')).toBeInTheDocument();
|
||||
};
|
||||
|
||||
it('renders action button', () => {
|
||||
it('renders action button', async () => {
|
||||
renderComponent(tasks);
|
||||
expectDropdownExists();
|
||||
await expectDropdownExists();
|
||||
expect(
|
||||
screen.getAllByRole('button', { name: 'Dropdown Toggle' }).length
|
||||
).toEqual(tasks.length);
|
||||
|
@ -108,11 +108,11 @@ describe('Tasks', () => {
|
|||
|
||||
it('works as expected', async () => {
|
||||
renderComponent(tasks);
|
||||
expectDropdownExists();
|
||||
await expectDropdownExists();
|
||||
const actionBtn = screen.getAllByRole('menuitem');
|
||||
expect(actionBtn[0]).toHaveTextContent('Restart task');
|
||||
|
||||
userEvent.click(actionBtn[0]);
|
||||
await userEvent.click(actionBtn[0]);
|
||||
expect(
|
||||
screen.getByText('Are you sure you want to restart the task?')
|
||||
).toBeInTheDocument();
|
||||
|
|
|
@ -5,7 +5,7 @@ import ClusterContext, {
|
|||
initialValue,
|
||||
} from 'components/contexts/ClusterContext';
|
||||
import List from 'components/Connect/List/List';
|
||||
import { act, screen, waitFor } from '@testing-library/react';
|
||||
import { screen, waitFor } from '@testing-library/react';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import { render, WithRoute } from 'lib/testHelpers';
|
||||
import { clusterConnectConnectorPath, clusterConnectorsPath } from 'lib/paths';
|
||||
|
@ -52,13 +52,11 @@ describe('Connectors List', () => {
|
|||
|
||||
it('opens broker when row clicked', async () => {
|
||||
renderComponent();
|
||||
await act(() => {
|
||||
userEvent.click(
|
||||
screen.getByRole('row', {
|
||||
name: 'hdfs-source-connector first SOURCE FileStreamSource a b c RUNNING 2 of 2',
|
||||
})
|
||||
);
|
||||
});
|
||||
await userEvent.click(
|
||||
screen.getByRole('row', {
|
||||
name: 'hdfs-source-connector first SOURCE FileStreamSource a b c RUNNING 2 of 2',
|
||||
})
|
||||
);
|
||||
await waitFor(() =>
|
||||
expect(mockedUsedNavigate).toBeCalledWith(
|
||||
clusterConnectConnectorPath(
|
||||
|
@ -105,7 +103,7 @@ describe('Connectors List', () => {
|
|||
const submitButton = screen.getAllByRole('button', {
|
||||
name: 'Confirm',
|
||||
})[0];
|
||||
await act(() => userEvent.click(submitButton));
|
||||
await userEvent.click(submitButton);
|
||||
expect(mockDelete).toHaveBeenCalledWith();
|
||||
});
|
||||
|
||||
|
|
|
@ -31,16 +31,14 @@ jest.mock('lib/hooks/api/kafkaConnect', () => ({
|
|||
describe('New', () => {
|
||||
const clusterName = 'my-cluster';
|
||||
const simulateFormSubmit = async () => {
|
||||
await act(() => {
|
||||
userEvent.type(
|
||||
screen.getByPlaceholderText('Connector Name'),
|
||||
'my-connector'
|
||||
);
|
||||
userEvent.type(
|
||||
screen.getByPlaceholderText('json'),
|
||||
'{"class":"MyClass"}'.replace(/[{[]/g, '$&$&')
|
||||
);
|
||||
});
|
||||
await userEvent.type(
|
||||
screen.getByPlaceholderText('Connector Name'),
|
||||
'my-connector'
|
||||
);
|
||||
await userEvent.type(
|
||||
screen.getByPlaceholderText('json'),
|
||||
'{"class":"MyClass"}'.replace(/[{[]/g, '$&$&')
|
||||
);
|
||||
|
||||
expect(screen.getByPlaceholderText('json')).toHaveValue(
|
||||
'{"class":"MyClass"}'
|
||||
|
|
|
@ -33,25 +33,24 @@ const resetConsumerGroupOffsetsMockCalled = () =>
|
|||
).toBeTruthy();
|
||||
|
||||
const selectresetTypeAndPartitions = async (resetType: string) => {
|
||||
userEvent.click(screen.getByLabelText('Reset Type'));
|
||||
userEvent.click(screen.getByText(resetType));
|
||||
userEvent.click(screen.getByText('Select...'));
|
||||
await waitFor(() => {
|
||||
userEvent.click(screen.getByText('Partition #0'));
|
||||
});
|
||||
await userEvent.click(screen.getByLabelText('Reset Type'));
|
||||
await userEvent.click(screen.getByText(resetType));
|
||||
await userEvent.click(screen.getByText('Select...'));
|
||||
|
||||
await userEvent.click(screen.getByText('Partition #0'));
|
||||
};
|
||||
|
||||
const resetConsumerGroupOffsetsWith = async (
|
||||
resetType: string,
|
||||
offset: null | number = null
|
||||
) => {
|
||||
userEvent.click(screen.getByLabelText('Reset Type'));
|
||||
await userEvent.click(screen.getByLabelText('Reset Type'));
|
||||
const options = screen.getAllByText(resetType);
|
||||
userEvent.click(options.length > 1 ? options[1] : options[0]);
|
||||
userEvent.click(screen.getByText('Select...'));
|
||||
await waitFor(() => {
|
||||
userEvent.click(screen.getByText('Partition #0'));
|
||||
});
|
||||
await userEvent.click(options.length > 1 ? options[1] : options[0]);
|
||||
await userEvent.click(screen.getByText('Select...'));
|
||||
|
||||
await userEvent.click(screen.getByText('Partition #0'));
|
||||
|
||||
fetchMock.postOnce(
|
||||
`/api/clusters/${clusterName}/consumer-groups/${groupId}/offsets`,
|
||||
200,
|
||||
|
@ -64,7 +63,7 @@ const resetConsumerGroupOffsetsWith = async (
|
|||
},
|
||||
}
|
||||
);
|
||||
userEvent.click(screen.getByText('Submit'));
|
||||
await userEvent.click(screen.getByText('Submit'));
|
||||
await waitFor(() => resetConsumerGroupOffsetsMockCalled());
|
||||
};
|
||||
|
||||
|
@ -116,14 +115,14 @@ describe('ResetOffsets', () => {
|
|||
},
|
||||
}
|
||||
);
|
||||
await waitFor(() => {
|
||||
userEvent.click(screen.getAllByLabelText('Partition #0')[1]);
|
||||
});
|
||||
await waitFor(() => {
|
||||
userEvent.keyboard('10');
|
||||
});
|
||||
userEvent.click(screen.getByText('Submit'));
|
||||
await waitFor(() => resetConsumerGroupOffsetsMockCalled());
|
||||
|
||||
await userEvent.click(screen.getAllByLabelText('Partition #0')[1]);
|
||||
|
||||
await userEvent.keyboard('10');
|
||||
|
||||
await userEvent.click(screen.getByText('Submit'));
|
||||
|
||||
await resetConsumerGroupOffsetsMockCalled();
|
||||
});
|
||||
it('calls resetConsumerGroupOffsets with TIMESTAMP', async () => {
|
||||
await selectresetTypeAndPartitions('TIMESTAMP');
|
||||
|
@ -139,7 +138,7 @@ describe('ResetOffsets', () => {
|
|||
},
|
||||
}
|
||||
);
|
||||
userEvent.click(screen.getByText('Submit'));
|
||||
await userEvent.click(screen.getByText('Submit'));
|
||||
await waitFor(() =>
|
||||
expect(
|
||||
screen.getByText("This field shouldn't be empty!")
|
||||
|
|
|
@ -13,7 +13,6 @@ import {
|
|||
waitForElementToBeRemoved,
|
||||
} from '@testing-library/dom';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import { act } from '@testing-library/react';
|
||||
|
||||
const clusterName = 'cluster1';
|
||||
const { groupId } = consumerGroupPayload;
|
||||
|
@ -71,7 +70,7 @@ describe('Details component', () => {
|
|||
});
|
||||
|
||||
it('handles [Reset offset] click', async () => {
|
||||
userEvent.click(screen.getByText('Reset offset'));
|
||||
await userEvent.click(screen.getByText('Reset offset'));
|
||||
expect(mockNavigate).toHaveBeenLastCalledWith(
|
||||
clusterConsumerGroupResetRelativePath
|
||||
);
|
||||
|
@ -86,19 +85,19 @@ describe('Details component', () => {
|
|||
|
||||
it('shows confirmation modal on consumer group delete', async () => {
|
||||
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
||||
userEvent.click(screen.getByText('Delete consumer group'));
|
||||
await userEvent.click(screen.getByText('Delete consumer group'));
|
||||
await waitFor(() =>
|
||||
expect(screen.queryByRole('dialog')).toBeInTheDocument()
|
||||
);
|
||||
userEvent.click(screen.getByText('Cancel'));
|
||||
await userEvent.click(screen.getByText('Cancel'));
|
||||
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('handles [Delete consumer group] click', async () => {
|
||||
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
||||
await act(() => {
|
||||
userEvent.click(screen.getByText('Delete consumer group'));
|
||||
});
|
||||
|
||||
await userEvent.click(screen.getByText('Delete consumer group'));
|
||||
|
||||
expect(screen.queryByRole('dialog')).toBeInTheDocument();
|
||||
const deleteConsumerGroupMock = fetchMock.deleteOnce(
|
||||
`/api/clusters/${clusterName}/consumer-groups/${groupId}`,
|
||||
|
|
|
@ -39,8 +39,8 @@ describe('ListItem', () => {
|
|||
expect(screen.getByRole('row')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('should renders list item with topic content open', () => {
|
||||
userEvent.click(screen.getAllByRole('cell')[0].children[0]);
|
||||
it('should renders list item with topic content open', async () => {
|
||||
await userEvent.click(screen.getAllByRole('cell')[0].children[0]);
|
||||
expect(screen.getByText('Consumer ID')).toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
|
|
@ -48,10 +48,10 @@ describe('List', () => {
|
|||
expect(screen.getByText('groupId2')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('handles onRowClick', () => {
|
||||
it('handles onRowClick', async () => {
|
||||
const row = screen.getByRole('row', { name: 'groupId1 0 1 1' });
|
||||
expect(row).toBeInTheDocument();
|
||||
userEvent.click(row);
|
||||
await userEvent.click(row);
|
||||
expect(mockedUsedNavigate).toHaveBeenCalledWith(
|
||||
clusterConsumerGroupDetailsPath(':clusterName', 'groupId1')
|
||||
);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import React from 'react';
|
||||
import { act, screen } from '@testing-library/react';
|
||||
import { screen } from '@testing-library/react';
|
||||
import ClustersWidget from 'components/Dashboard/ClustersWidget/ClustersWidget';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import { render } from 'lib/testHelpers';
|
||||
|
@ -16,18 +16,16 @@ describe('ClustersWidget', () => {
|
|||
data: clustersPayload,
|
||||
isSuccess: true,
|
||||
}));
|
||||
await act(() => {
|
||||
render(<ClustersWidget />);
|
||||
});
|
||||
await render(<ClustersWidget />);
|
||||
});
|
||||
|
||||
it('renders clusterWidget list', () => {
|
||||
expect(screen.getAllByRole('row').length).toBe(3);
|
||||
});
|
||||
|
||||
it('hides online cluster widgets', () => {
|
||||
it('hides online cluster widgets', async () => {
|
||||
expect(screen.getAllByRole('row').length).toBe(3);
|
||||
userEvent.click(screen.getByRole('checkbox'));
|
||||
await userEvent.click(screen.getByRole('checkbox'));
|
||||
expect(screen.getAllByRole('row').length).toBe(2);
|
||||
});
|
||||
|
||||
|
|
|
@ -7,7 +7,6 @@ import KsqlDbItem, {
|
|||
} from 'components/KsqlDb/List/KsqlDbItem/KsqlDbItem';
|
||||
import { screen } from '@testing-library/dom';
|
||||
import { fetchKsqlDbTablesPayload } from 'redux/reducers/ksqlDb/__test__/fixtures';
|
||||
import { act } from '@testing-library/react';
|
||||
|
||||
describe('KsqlDbItem', () => {
|
||||
const tablesPathname = clusterKsqlDbTablesPath();
|
||||
|
@ -27,37 +26,34 @@ describe('KsqlDbItem', () => {
|
|||
);
|
||||
};
|
||||
|
||||
it('renders progressbar when fetching tables and streams', async () => {
|
||||
await act(() => renderComponent({ fetching: true }));
|
||||
it('renders progressbar when fetching tables and streams', () => {
|
||||
renderComponent({ fetching: true });
|
||||
expect(screen.getByRole('progressbar')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('show no text if no data found', async () => {
|
||||
await act(() => renderComponent({}));
|
||||
it('show no text if no data found', () => {
|
||||
renderComponent({});
|
||||
expect(screen.getByText('No tables or streams found')).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it('renders with tables', async () => {
|
||||
await act(() =>
|
||||
renderComponent({
|
||||
rows: {
|
||||
tables: fetchKsqlDbTablesPayload.tables,
|
||||
streams: [],
|
||||
},
|
||||
})
|
||||
);
|
||||
it('renders with tables', () => {
|
||||
renderComponent({
|
||||
rows: {
|
||||
tables: fetchKsqlDbTablesPayload.tables,
|
||||
streams: [],
|
||||
},
|
||||
});
|
||||
|
||||
expect(screen.getByRole('table').querySelectorAll('td')).toHaveLength(10);
|
||||
});
|
||||
it('renders with streams', async () => {
|
||||
await act(() =>
|
||||
renderComponent({
|
||||
type: KsqlDbItemType.Streams,
|
||||
rows: {
|
||||
tables: [],
|
||||
streams: fetchKsqlDbTablesPayload.streams,
|
||||
},
|
||||
})
|
||||
);
|
||||
it('renders with streams', () => {
|
||||
renderComponent({
|
||||
type: KsqlDbItemType.Streams,
|
||||
rows: {
|
||||
tables: [],
|
||||
streams: fetchKsqlDbTablesPayload.streams,
|
||||
},
|
||||
});
|
||||
expect(screen.getByRole('table').querySelectorAll('td')).toHaveLength(10);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -34,17 +34,22 @@ export const StreamPropertiesContainer = styled.label`
|
|||
`;
|
||||
|
||||
export const InputsContainer = styled.div`
|
||||
overflow: hidden;
|
||||
width: 100%;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
gap: 10px;
|
||||
`;
|
||||
|
||||
export const StreamPropertiesInputWrapper = styled.div`
|
||||
& {
|
||||
width: 100%;
|
||||
}
|
||||
& > input {
|
||||
width: 100%;
|
||||
height: 40px;
|
||||
border: 1px solid grey;
|
||||
border-radius: 4px;
|
||||
min-width: 300px;
|
||||
font-size: 16px;
|
||||
padding-left: 15px;
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ import React from 'react';
|
|||
import QueryForm, { Props } from 'components/KsqlDb/Query/QueryForm/QueryForm';
|
||||
import { screen, waitFor, within } from '@testing-library/dom';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
import { act } from '@testing-library/react';
|
||||
|
||||
const renderComponent = (props: Props) => render(<QueryForm {...props} />);
|
||||
|
||||
|
@ -57,10 +56,9 @@ describe('QueryForm', () => {
|
|||
submitHandler: submitFn,
|
||||
});
|
||||
|
||||
await act(() =>
|
||||
userEvent.click(screen.getByRole('button', { name: 'Execute' }))
|
||||
);
|
||||
waitFor(() => {
|
||||
await userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||
|
||||
await waitFor(() => {
|
||||
expect(screen.getByText('ksql is a required field')).toBeInTheDocument();
|
||||
expect(submitFn).not.toBeCalled();
|
||||
});
|
||||
|
@ -76,12 +74,16 @@ describe('QueryForm', () => {
|
|||
submitHandler: submitFn,
|
||||
});
|
||||
|
||||
await act(() => {
|
||||
userEvent.paste(screen.getAllByRole('textbox')[0], 'show tables;');
|
||||
userEvent.paste(screen.getByRole('textbox', { name: 'key' }), 'test');
|
||||
userEvent.paste(screen.getByRole('textbox', { name: 'value' }), 'test');
|
||||
userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||
});
|
||||
const textbox = screen.getAllByRole('textbox');
|
||||
textbox[0].focus();
|
||||
await userEvent.paste('show tables;');
|
||||
const key = screen.getByRole('textbox', { name: 'key' });
|
||||
key.focus();
|
||||
await userEvent.paste('test');
|
||||
const value = screen.getByRole('textbox', { name: 'value' });
|
||||
value.focus();
|
||||
await userEvent.paste('test');
|
||||
await userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||
|
||||
expect(
|
||||
screen.queryByText('ksql is a required field')
|
||||
|
@ -106,8 +108,8 @@ describe('QueryForm', () => {
|
|||
|
||||
expect(screen.getByRole('button', { name: 'Clear results' })).toBeEnabled();
|
||||
|
||||
await act(() =>
|
||||
userEvent.click(screen.getByRole('button', { name: 'Clear results' }))
|
||||
await userEvent.click(
|
||||
screen.getByRole('button', { name: 'Clear results' })
|
||||
);
|
||||
|
||||
expect(clearFn).toBeCalled();
|
||||
|
@ -125,39 +127,12 @@ describe('QueryForm', () => {
|
|||
|
||||
expect(screen.getByRole('button', { name: 'Stop query' })).toBeEnabled();
|
||||
|
||||
await act(() =>
|
||||
userEvent.click(screen.getByRole('button', { name: 'Stop query' }))
|
||||
);
|
||||
await userEvent.click(screen.getByRole('button', { name: 'Stop query' }));
|
||||
|
||||
expect(cancelFn).toBeCalled();
|
||||
});
|
||||
|
||||
it('submits form with ctrl+enter on KSQL editor', async () => {
|
||||
const submitFn = jest.fn();
|
||||
renderComponent({
|
||||
fetching: false,
|
||||
hasResults: false,
|
||||
handleClearResults: jest.fn(),
|
||||
handleSSECancel: jest.fn(),
|
||||
submitHandler: submitFn,
|
||||
});
|
||||
|
||||
await act(() => {
|
||||
userEvent.paste(
|
||||
within(screen.getByLabelText('KSQL')).getByRole('textbox'),
|
||||
'show tables;'
|
||||
);
|
||||
|
||||
userEvent.type(
|
||||
within(screen.getByLabelText('KSQL')).getByRole('textbox'),
|
||||
'{ctrl}{enter}'
|
||||
);
|
||||
});
|
||||
|
||||
expect(submitFn.mock.calls.length).toBe(1);
|
||||
});
|
||||
|
||||
it('adds new property', async () => {
|
||||
it('add new property', async () => {
|
||||
renderComponent({
|
||||
fetching: false,
|
||||
hasResults: false,
|
||||
|
@ -168,11 +143,9 @@ describe('QueryForm', () => {
|
|||
|
||||
const textbox = screen.getByLabelText('key');
|
||||
await userEvent.type(textbox, 'prop_name');
|
||||
await act(() => {
|
||||
userEvent.click(
|
||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||
);
|
||||
});
|
||||
await userEvent.click(
|
||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||
);
|
||||
expect(screen.getAllByRole('textbox', { name: 'key' }).length).toEqual(2);
|
||||
});
|
||||
|
||||
|
@ -185,11 +158,9 @@ describe('QueryForm', () => {
|
|||
submitHandler: jest.fn(),
|
||||
});
|
||||
|
||||
await act(() => {
|
||||
userEvent.click(
|
||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||
);
|
||||
});
|
||||
await userEvent.click(
|
||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||
);
|
||||
expect(screen.getAllByRole('textbox', { name: 'key' }).length).toEqual(1);
|
||||
});
|
||||
|
||||
|
@ -201,16 +172,18 @@ describe('QueryForm', () => {
|
|||
handleSSECancel: jest.fn(),
|
||||
submitHandler: jest.fn(),
|
||||
});
|
||||
const textBoxes = screen.getAllByRole('textbox', { name: 'key' });
|
||||
textBoxes[0].focus();
|
||||
await userEvent.paste('test');
|
||||
await userEvent.click(
|
||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||
);
|
||||
await userEvent.click(screen.getAllByLabelText('deleteProperty')[0]);
|
||||
|
||||
await act(() => {
|
||||
userEvent.paste(screen.getByRole('textbox', { name: 'key' }), 'test');
|
||||
userEvent.click(
|
||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||
);
|
||||
});
|
||||
await act(() => {
|
||||
userEvent.click(screen.getAllByLabelText('deleteProperty')[0]);
|
||||
});
|
||||
expect(screen.getAllByRole('textbox', { name: 'key' }).length).toEqual(1);
|
||||
await screen.getByRole('button', { name: 'Add Stream Property' });
|
||||
|
||||
await userEvent.click(screen.getAllByLabelText('deleteProperty')[0]);
|
||||
|
||||
expect(textBoxes.length).toEqual(1);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -6,7 +6,6 @@ import Query, {
|
|||
import { screen } from '@testing-library/dom';
|
||||
import fetchMock from 'fetch-mock';
|
||||
import { clusterKsqlDbQueryPath } from 'lib/paths';
|
||||
import { act } from '@testing-library/react';
|
||||
import userEvent from '@testing-library/user-event';
|
||||
|
||||
const clusterName = 'testLocal';
|
||||
|
@ -41,10 +40,10 @@ describe('Query', () => {
|
|||
});
|
||||
const inputs = screen.getAllByRole('textbox');
|
||||
const textAreaElement = inputs[0] as HTMLTextAreaElement;
|
||||
await act(() => {
|
||||
userEvent.paste(textAreaElement, 'show tables;');
|
||||
userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||
});
|
||||
|
||||
textAreaElement.focus();
|
||||
await userEvent.paste('show tables;');
|
||||
await userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||
|
||||
expect(mock.calls().length).toBe(1);
|
||||
});
|
||||
|
@ -59,18 +58,20 @@ describe('Query', () => {
|
|||
Object.defineProperty(window, 'EventSource', {
|
||||
value: EventSourceMock,
|
||||
});
|
||||
await act(() => {
|
||||
const inputs = screen.getAllByRole('textbox');
|
||||
const textAreaElement = inputs[0] as HTMLTextAreaElement;
|
||||
userEvent.paste(textAreaElement, 'show tables;');
|
||||
});
|
||||
await act(() => {
|
||||
userEvent.paste(screen.getByLabelText('key'), 'key');
|
||||
userEvent.paste(screen.getByLabelText('value'), 'value');
|
||||
});
|
||||
await act(() => {
|
||||
userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||
});
|
||||
|
||||
const inputs = screen.getAllByRole('textbox');
|
||||
const textAreaElement = inputs[0] as HTMLTextAreaElement;
|
||||
textAreaElement.focus();
|
||||
await userEvent.paste('show tables;');
|
||||
|
||||
const key = screen.getByLabelText('key');
|
||||
key.focus();
|
||||
await userEvent.paste('key');
|
||||
const value = screen.getByLabelText('value');
|
||||
value.focus();
|
||||
await userEvent.paste('value');
|
||||
|
||||
await userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||
|
||||
expect(mock.calls().length).toBe(1);
|
||||
});
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue