merged with master
This commit is contained in:
commit
fcfdc69b45
146 changed files with 1756 additions and 3240 deletions
4
.github/workflows/frontend.yaml
vendored
4
.github/workflows/frontend.yaml
vendored
|
@ -20,11 +20,11 @@ jobs:
|
||||||
# Disabling shallow clone is recommended for improving relevancy of reporting
|
# Disabling shallow clone is recommended for improving relevancy of reporting
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
ref: ${{ github.event.pull_request.head.sha }}
|
ref: ${{ github.event.pull_request.head.sha }}
|
||||||
- uses: pnpm/action-setup@v2.2.3
|
- uses: pnpm/action-setup@v2.2.4
|
||||||
with:
|
with:
|
||||||
version: 7.4.0
|
version: 7.4.0
|
||||||
- name: Install node
|
- name: Install node
|
||||||
uses: actions/setup-node@v3.4.1
|
uses: actions/setup-node@v3.5.1
|
||||||
with:
|
with:
|
||||||
node-version: "16.15.0"
|
node-version: "16.15.0"
|
||||||
cache: "pnpm"
|
cache: "pnpm"
|
||||||
|
|
25
.github/workflows/helm.yaml
vendored
25
.github/workflows/helm.yaml
vendored
|
@ -12,9 +12,18 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
- name: Helm tool installer
|
- name: Helm tool installer
|
||||||
uses: Azure/setup-helm@v1
|
uses: Azure/setup-helm@v3
|
||||||
- name: Setup Kubeval
|
- name: Setup Kubeval
|
||||||
uses: lra/setup-kubeval@v1.0.1
|
uses: lra/setup-kubeval@v1.0.1
|
||||||
|
#check, was helm version increased in Chart.yaml?
|
||||||
|
- name: Check version
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||||
|
helm_version_old=$(curl -s https://raw.githubusercontent.com/provectus/kafka-ui/master/charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}' )
|
||||||
|
echo $helm_version_old
|
||||||
|
echo $helm_version_new
|
||||||
|
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
|
||||||
- name: Run kubeval
|
- name: Run kubeval
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
@ -27,17 +36,3 @@ jobs:
|
||||||
echo $version;
|
echo $version;
|
||||||
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
|
helm template --kube-version $version --set ingress.enabled=true charts/kafka-ui -f charts/kafka-ui/values.yaml | kubeval --additional-schema-locations https://raw.githubusercontent.com/yannh/kubernetes-json-schema/master --strict -v $version;
|
||||||
done
|
done
|
||||||
#check, was helm version increased in Chart.yaml?
|
|
||||||
- name: Check version
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
git fetch
|
|
||||||
git checkout master
|
|
||||||
helm_version_old=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
|
||||||
git checkout $GITHUB_HEAD_REF
|
|
||||||
helm_version_new=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
|
||||||
echo $helm_version_old
|
|
||||||
echo $helm_version_new
|
|
||||||
if [[ "$helm_version_new" > "$helm_version_old" ]]; then exit 0 ; else exit 1 ; fi
|
|
||||||
|
|
||||||
|
|
||||||
|
|
11
.github/workflows/release-helm.yaml
vendored
11
.github/workflows/release-helm.yaml
vendored
|
@ -19,19 +19,20 @@ jobs:
|
||||||
git config user.name github-actions
|
git config user.name github-actions
|
||||||
git config user.email github-actions@github.com
|
git config user.email github-actions@github.com
|
||||||
|
|
||||||
- uses: azure/setup-helm@v1
|
- uses: azure/setup-helm@v3
|
||||||
|
|
||||||
- name: add chart #realse helm with new version
|
- name: add chart #realse helm with new version
|
||||||
run: |
|
run: |
|
||||||
echo "VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')" >> $GITHUB_ENV
|
VERSION=$(cat charts/kafka-ui/Chart.yaml | grep version | awk '{print $2}')
|
||||||
MSG=$(helm package charts/kafka-ui)
|
echo "HELM_VERSION=$(echo ${VERSION})" >> $GITHUB_ENV
|
||||||
|
MSG=$(helm package charts/kafka-ui)
|
||||||
git fetch origin
|
git fetch origin
|
||||||
git stash
|
git stash
|
||||||
git checkout -b gh-pages origin/gh-pages
|
git checkout -b gh-pages origin/gh-pages
|
||||||
helm repo index .
|
helm repo index .
|
||||||
git add -f ${MSG##*/} index.yaml
|
git add -f ${MSG##*/} index.yaml
|
||||||
git commit -m "release ${{ env.VERSION }}"
|
git commit -m "release ${VERSION}"
|
||||||
git push
|
git push
|
||||||
- uses: rickstaa/action-create-tag@v1 #create new tag
|
- uses: rickstaa/action-create-tag@v1 #create new tag
|
||||||
with:
|
with:
|
||||||
tag: "charts/kafka-ui-${{ env.VERSION }}"
|
tag: "charts/kafka-ui-${{ env.HELM_VERSION }}"
|
||||||
|
|
2
.github/workflows/stale.yaml
vendored
2
.github/workflows/stale.yaml
vendored
|
@ -7,7 +7,7 @@ jobs:
|
||||||
stale:
|
stale:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/stale@v5
|
- uses: actions/stale@v6
|
||||||
with:
|
with:
|
||||||
days-before-issue-stale: 7
|
days-before-issue-stale: 7
|
||||||
days-before-issue-close: 3
|
days-before-issue-close: 3
|
||||||
|
|
|
@ -2,6 +2,6 @@ apiVersion: v2
|
||||||
name: kafka-ui
|
name: kafka-ui
|
||||||
description: A Helm chart for kafka-UI
|
description: A Helm chart for kafka-UI
|
||||||
type: application
|
type: application
|
||||||
version: 0.4.3
|
version: 0.4.4
|
||||||
appVersion: latest
|
appVersion: v0.4.0
|
||||||
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
|
icon: https://github.com/provectus/kafka-ui/raw/master/documentation/images/kafka-ui-logo.png
|
||||||
|
|
|
@ -18,6 +18,7 @@ spec:
|
||||||
{{- toYaml . | nindent 8 }}
|
{{- toYaml . | nindent 8 }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }}
|
||||||
|
checksum/configFromValues: {{ include (print $.Template.BasePath "/configmap_fromValues.yaml") . | sha256sum }}
|
||||||
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }}
|
||||||
labels:
|
labels:
|
||||||
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
|
{{- include "kafka-ui.selectorLabels" . | nindent 8 }}
|
||||||
|
|
|
@ -17,7 +17,6 @@ services:
|
||||||
environment:
|
environment:
|
||||||
- KAFKA_CLUSTERS_0_NAME=local
|
- KAFKA_CLUSTERS_0_NAME=local
|
||||||
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
|
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
|
||||||
- KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
|
|
||||||
```
|
```
|
||||||
|
|
||||||
* If you prefer UI for Apache Kafka in read only mode
|
* If you prefer UI for Apache Kafka in read only mode
|
||||||
|
@ -34,7 +33,6 @@ services:
|
||||||
environment:
|
environment:
|
||||||
- KAFKA_CLUSTERS_0_NAME=local
|
- KAFKA_CLUSTERS_0_NAME=local
|
||||||
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
|
- KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS=kafka:9092
|
||||||
- KAFKA_CLUSTERS_0_ZOOKEEPER=localhost:2181
|
|
||||||
- KAFKA_CLUSTERS_0_READONLY=true
|
- KAFKA_CLUSTERS_0_READONLY=true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -13,3 +13,4 @@
|
||||||
11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
|
11. [kafka-ui-traefik-proxy.yaml](./kafka-ui-traefik-proxy.yaml) - Traefik specific proxy configuration.
|
||||||
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
|
12. [oauth-cognito.yaml](./oauth-cognito.yaml) - OAuth2 with Cognito
|
||||||
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
|
13. [kafka-ui-with-jmx-exporter.yaml](./kafka-ui-with-jmx-exporter.yaml) - A configuration with 2 kafka clusters with enabled prometheus jmx exporters instead of jmx.
|
||||||
|
14. [kafka-with-zookeeper.yaml](./kafka-with-zookeeper.yaml) - An example for using kafka with zookeeper
|
|
@ -8,24 +8,13 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
- schemaregistry0
|
- schemaregistry0
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
|
||||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
|
||||||
KAFKA_CLUSTERS_1_NAME: secondLocal
|
|
||||||
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
|
||||||
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
|
|
||||||
KAFKA_CLUSTERS_1_METRICS_PORT: 9998
|
|
||||||
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
|
|
||||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
|
|
||||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
|
||||||
AUTH_TYPE: "LDAP"
|
AUTH_TYPE: "LDAP"
|
||||||
SPRING_LDAP_URLS: "ldap://ldap:10389"
|
SPRING_LDAP_URLS: "ldap://ldap:10389"
|
||||||
SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
|
SPRING_LDAP_DN_PATTERN: "cn={0},ou=people,dc=planetexpress,dc=com"
|
||||||
|
@ -47,41 +36,43 @@ services:
|
||||||
image: rroemhild/test-openldap:latest
|
image: rroemhild/test-openldap:latest
|
||||||
hostname: "ldap"
|
hostname: "ldap"
|
||||||
|
|
||||||
zookeeper0:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2181:2181
|
|
||||||
|
|
||||||
kafka0:
|
kafka0:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
depends_on:
|
hostname: kafka0
|
||||||
- zookeeper0
|
container_name: kafka0
|
||||||
ports:
|
ports:
|
||||||
- 9092:9092
|
- "9092:9092"
|
||||||
- 9997:9997
|
- "9997:9997"
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9997
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_JMX_PORT: 9997
|
||||||
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||||
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
|
volumes:
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||||
|
|
||||||
schemaregistry0:
|
schemaregistry0:
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
image: confluentinc/cp-schema-registry:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 8085:8085
|
- 8085:8085
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||||
|
|
|
@ -8,57 +8,55 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
- schemaregistry0
|
- schemaregistry0
|
||||||
- kafka-connect0
|
- kafka-connect0
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
|
||||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||||
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
|
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
|
||||||
|
|
||||||
zookeeper0:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2181:2181
|
|
||||||
|
|
||||||
kafka0:
|
kafka0:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
depends_on:
|
hostname: kafka0
|
||||||
- zookeeper0
|
container_name: kafka0
|
||||||
ports:
|
ports:
|
||||||
- 9092:9092
|
- "9092:9092"
|
||||||
- 9997:9997
|
- "9997:9997"
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9997
|
KAFKA_JMX_PORT: 9997
|
||||||
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||||
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
|
volumes:
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||||
|
|
||||||
schemaregistry0:
|
schemaregistry0:
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
image: confluentinc/cp-schema-registry:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 8085:8085
|
- 8085:8085
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||||
|
@ -98,17 +96,16 @@ services:
|
||||||
# AWS_SECRET_ACCESS_KEY: ""
|
# AWS_SECRET_ACCESS_KEY: ""
|
||||||
|
|
||||||
kafka-init-topics:
|
kafka-init-topics:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
volumes:
|
volumes:
|
||||||
- ./message.json:/data/message.json
|
- ./message.json:/data/message.json
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka0
|
- kafka0
|
||||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
|
||||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
|
||||||
|
|
||||||
postgres-db:
|
postgres-db:
|
||||||
build:
|
build:
|
||||||
|
|
|
@ -2,43 +2,44 @@
|
||||||
version: '2'
|
version: '2'
|
||||||
services:
|
services:
|
||||||
|
|
||||||
zookeeper1:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2182:2181
|
|
||||||
|
|
||||||
kafka1:
|
kafka1:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
depends_on:
|
hostname: kafka1
|
||||||
- zookeeper1
|
container_name: kafka1
|
||||||
|
ports:
|
||||||
|
- "9092:9092"
|
||||||
|
- "9997:9997"
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9998
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
ports:
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
- 9093:9093
|
KAFKA_JMX_PORT: 9997
|
||||||
- 9998:9998
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093'
|
||||||
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
|
volumes:
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||||
|
|
||||||
schemaregistry1:
|
schemaregistry1:
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
image: confluentinc/cp-schema-registry:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 18085:8085
|
- 18085:8085
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper1
|
|
||||||
- kafka1
|
- kafka1
|
||||||
volumes:
|
volumes:
|
||||||
- ./jaas:/conf
|
- ./jaas:/conf
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
||||||
|
@ -54,13 +55,29 @@ services:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||||
|
|
||||||
kafka-init-topics:
|
kafka-init-topics:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
volumes:
|
volumes:
|
||||||
- ./message.json:/data/message.json
|
- ./message.json:/data/message.json
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka1
|
- kafka1
|
||||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
|
||||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
|
||||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
kafka-console-producer --bootstrap-server kafka1:29092 --topic users < /data/message.json'"
|
||||||
|
|
||||||
|
kafka-ui:
|
||||||
|
container_name: kafka-ui
|
||||||
|
image: provectuslabs/kafka-ui:latest
|
||||||
|
ports:
|
||||||
|
- 8080:8080
|
||||||
|
depends_on:
|
||||||
|
- kafka1
|
||||||
|
- schemaregistry1
|
||||||
|
environment:
|
||||||
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka1:29092
|
||||||
|
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||||
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||||
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_USERNAME: admin
|
||||||
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRYAUTH_PASSWORD: letmein
|
|
@ -1,83 +1,41 @@
|
||||||
---
|
---
|
||||||
version: '2'
|
version: "2"
|
||||||
services:
|
services:
|
||||||
|
|
||||||
zookeeper0:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2181:2181
|
|
||||||
|
|
||||||
kafka0:
|
kafka0:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
depends_on:
|
hostname: kafka0
|
||||||
- zookeeper0
|
container_name: kafka0
|
||||||
|
ports:
|
||||||
|
- "9092:9092"
|
||||||
|
- "9997:9997"
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
|
|
||||||
JMX_PORT: 9997
|
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9997
|
|
||||||
ports:
|
|
||||||
- 9092:9092
|
|
||||||
- 9997:9997
|
|
||||||
|
|
||||||
kafka01:
|
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
|
||||||
depends_on:
|
|
||||||
- zookeeper0
|
|
||||||
environment:
|
|
||||||
KAFKA_BROKER_ID: 2
|
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka01:29092,PLAINTEXT_HOST://localhost:9094
|
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT,PLAIN:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2
|
|
||||||
JMX_PORT: 9999
|
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9999
|
|
||||||
ports:
|
|
||||||
- 9094:9094
|
|
||||||
- 9999:9999
|
|
||||||
|
|
||||||
zookeeper1:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2182:2181
|
|
||||||
|
|
||||||
kafka1:
|
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
|
||||||
depends_on:
|
|
||||||
- zookeeper1
|
|
||||||
environment:
|
|
||||||
KAFKA_BROKER_ID: 1
|
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9998
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=localhost -Dcom.sun.management.jmxremote.rmi.port=9998
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
ports:
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
- 9093:9093
|
KAFKA_JMX_PORT: 9997
|
||||||
- 9998:9998
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
|
KAFKA_PROCESS_ROLES: "broker,controller"
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
|
||||||
|
KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
|
||||||
|
KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
|
||||||
|
volumes:
|
||||||
|
- ./scripts/update_run_cluster.sh:/tmp/update_run.sh
|
||||||
|
- ./scripts/clusterID:/tmp/clusterID
|
||||||
|
command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
|
||||||
|
|
||||||
schemaregistry0:
|
schemaregistry0:
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
image: confluentinc/cp-schema-registry:7.2.1
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
- kafka01
|
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092,PLAINTEXT://kafka01:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||||
|
@ -86,28 +44,10 @@ services:
|
||||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||||
ports:
|
ports:
|
||||||
- 8085:8085
|
- 8085:8085
|
||||||
|
|
||||||
schemaregistry1:
|
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
|
||||||
ports:
|
|
||||||
- 18085:8085
|
|
||||||
depends_on:
|
|
||||||
- zookeeper1
|
|
||||||
- kafka1
|
|
||||||
environment:
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
|
||||||
|
|
||||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
|
||||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
|
||||||
|
|
||||||
kafka-connect0:
|
kafka-connect0:
|
||||||
image: confluentinc/cp-kafka-connect:6.0.1
|
image: confluentinc/cp-kafka-connect:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 8083:8083
|
- 8083:8083
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -131,16 +71,14 @@ services:
|
||||||
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
||||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||||
|
|
||||||
|
|
||||||
kafka-init-topics:
|
kafka-init-topics:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
volumes:
|
volumes:
|
||||||
- ./message.json:/data/message.json
|
- ./message.json:/data/message.json
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka1
|
- kafka0
|
||||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
|
||||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
|
||||||
|
|
|
@ -7,13 +7,11 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
- kafka
|
||||||
- kafka0
|
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
|
KAFKA_CLUSTERS_0_PROPERTIES_SECURITY_PROTOCOL: SSL
|
||||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092 # SSL LISTENER!
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 # SSL LISTENER!
|
||||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
|
||||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
|
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_LOCATION: /kafka.truststore.jks
|
||||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
|
KAFKA_CLUSTERS_0_PROPERTIES_SSL_TRUSTSTORE_PASSWORD: secret
|
||||||
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
|
KAFKA_CLUSTERS_0_PROPERTIES_SSL_KEYSTORE_LOCATION: /kafka.keystore.jks
|
||||||
|
@ -23,28 +21,30 @@ services:
|
||||||
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
|
- ./ssl/kafka.truststore.jks:/kafka.truststore.jks
|
||||||
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
|
- ./ssl/kafka.keystore.jks:/kafka.keystore.jks
|
||||||
|
|
||||||
zookeeper0:
|
kafka:
|
||||||
image: confluentinc/cp-zookeeper:6.0.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
environment:
|
hostname: kafka
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
container_name: kafka
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
ports:
|
||||||
- 2181:2181
|
- "9092:9092"
|
||||||
|
- "9997:9997"
|
||||||
kafka0:
|
|
||||||
image: confluentinc/cp-kafka:6.0.1
|
|
||||||
hostname: kafka0
|
|
||||||
depends_on:
|
|
||||||
- zookeeper0
|
|
||||||
ports:
|
|
||||||
- '9092:9092'
|
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,SSL:SSL,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
|
KAFKA_ADVERTISED_LISTENERS: 'SSL://kafka:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
KAFKA_ADVERTISED_LISTENERS: SSL://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: SSL:SSL,PLAINTEXT_HOST:PLAINTEXT
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: SSL
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_JMX_PORT: 9997
|
||||||
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
|
||||||
|
KAFKA_LISTENERS: 'SSL://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'SSL'
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
KAFKA_SECURITY_PROTOCOL: SSL
|
KAFKA_SECURITY_PROTOCOL: SSL
|
||||||
KAFKA_SSL_ENABLED_MECHANISMS: PLAIN,SSL
|
KAFKA_SSL_ENABLED_MECHANISMS: PLAIN,SSL
|
||||||
KAFKA_SSL_KEYSTORE_FILENAME: kafka.keystore.jks
|
KAFKA_SSL_KEYSTORE_FILENAME: kafka.keystore.jks
|
||||||
|
@ -56,6 +56,8 @@ services:
|
||||||
KAFKA_SSL_CLIENT_AUTH: 'requested'
|
KAFKA_SSL_CLIENT_AUTH: 'requested'
|
||||||
KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # COMMON NAME VERIFICATION IS DISABLED SERVER-SIDE
|
KAFKA_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM: '' # COMMON NAME VERIFICATION IS DISABLED SERVER-SIDE
|
||||||
volumes:
|
volumes:
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
- ./ssl/creds:/etc/kafka/secrets/creds
|
- ./ssl/creds:/etc/kafka/secrets/creds
|
||||||
- ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
|
- ./ssl/kafka.truststore.jks:/etc/kafka/secrets/kafka.truststore.jks
|
||||||
- ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
|
- ./ssl/kafka.keystore.jks:/etc/kafka/secrets/kafka.keystore.jks
|
||||||
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
|
@ -1,5 +1,3 @@
|
||||||
# This compose file uses kafka cluster without zookeeper
|
|
||||||
# Kafka without zookeeper is supported after image tag 6.2.0
|
|
||||||
# ARM64 supported images for kafka can be found here
|
# ARM64 supported images for kafka can be found here
|
||||||
# https://hub.docker.com/r/confluentinc/cp-kafka/tags?page=1&name=arm64
|
# https://hub.docker.com/r/confluentinc/cp-kafka/tags?page=1&name=arm64
|
||||||
---
|
---
|
||||||
|
@ -12,18 +10,18 @@ services:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka0
|
- kafka0
|
||||||
- schemaregistry0
|
- schema-registry0
|
||||||
- kafka-connect0
|
- kafka-connect0
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||||
KAFKA_CLUSTERS_0_JMXPORT: 9997
|
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schema-registry0:8085
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||||
|
|
||||||
kafka0:
|
kafka0:
|
||||||
image: confluentinc/cp-kafka:7.0.5.arm64
|
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||||
hostname: kafka0
|
hostname: kafka0
|
||||||
container_name: kafka0
|
container_name: kafka0
|
||||||
ports:
|
ports:
|
||||||
|
@ -44,14 +42,14 @@ services:
|
||||||
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
JMX_PORT: 9997
|
KAFKA_JMX_PORT: 9997
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||||
volumes:
|
volumes:
|
||||||
- ./scripts/update_run.sh:/tmp/update_run.sh
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||||
|
|
||||||
schemaregistry0:
|
schema-registry0:
|
||||||
image: confluentinc/cp-schema-registry:7.0.5.arm64
|
image: confluentinc/cp-schema-registry:7.2.1.arm64
|
||||||
ports:
|
ports:
|
||||||
- 8085:8085
|
- 8085:8085
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -59,20 +57,20 @@ services:
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
SCHEMA_REGISTRY_HOST_NAME: schema-registry0
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schema-registry0:8085
|
||||||
|
|
||||||
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
||||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||||
|
|
||||||
kafka-connect0:
|
kafka-connect0:
|
||||||
image: confluentinc/cp-kafka-connect:7.0.5.arm64
|
image: confluentinc/cp-kafka-connect:7.2.1.arm64
|
||||||
ports:
|
ports:
|
||||||
- 8083:8083
|
- 8083:8083
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka0
|
- kafka0
|
||||||
- schemaregistry0
|
- schema-registry0
|
||||||
environment:
|
environment:
|
||||||
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
|
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
|
||||||
CONNECT_GROUP_ID: compose-connect-group
|
CONNECT_GROUP_ID: compose-connect-group
|
||||||
|
@ -83,16 +81,16 @@ services:
|
||||||
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
|
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
|
||||||
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
|
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
|
||||||
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||||
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry0:8085
|
||||||
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
||||||
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry0:8085
|
||||||
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||||
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
||||||
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
||||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||||
|
|
||||||
kafka-init-topics:
|
kafka-init-topics:
|
||||||
image: confluentinc/cp-kafka:7.0.5.arm64
|
image: confluentinc/cp-kafka:7.2.1.arm64
|
||||||
volumes:
|
volumes:
|
||||||
- ./message.json:/data/message.json
|
- ./message.json:/data/message.json
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -102,4 +100,4 @@ services:
|
||||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
|
kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"
|
||||||
|
|
|
@ -8,52 +8,40 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
- kafka
|
||||||
- kafka0
|
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092
|
||||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
|
||||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||||
SERVER_SERVLET_CONTEXT_PATH: /kafkaui
|
SERVER_SERVLET_CONTEXT_PATH: /kafkaui
|
||||||
AUTH_TYPE: "LOGIN_FORM"
|
AUTH_TYPE: "LOGIN_FORM"
|
||||||
SPRING_SECURITY_USER_NAME: admin
|
SPRING_SECURITY_USER_NAME: admin
|
||||||
SPRING_SECURITY_USER_PASSWORD: pass
|
SPRING_SECURITY_USER_PASSWORD: pass
|
||||||
|
|
||||||
zookeeper0:
|
kafka:
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
environment:
|
hostname: kafka
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
container_name: kafka
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
ports:
|
||||||
- 2181:2181
|
- "9092:9092"
|
||||||
|
- "9997:9997"
|
||||||
kafka0:
|
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
|
||||||
depends_on:
|
|
||||||
- zookeeper0
|
|
||||||
ports:
|
|
||||||
- 9092:9092
|
|
||||||
- 9997:9997
|
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9997
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
kafka-init-topics:
|
KAFKA_JMX_PORT: 9997
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka:29093'
|
||||||
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka:29092,CONTROLLER://kafka:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
volumes:
|
volumes:
|
||||||
- ./message.json:/data/message.json
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
depends_on:
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||||
- kafka0
|
|
||||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
|
||||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
|
||||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
|
||||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
|
||||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
|
||||||
kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
|
|
|
@ -1,68 +1,62 @@
|
||||||
---
|
---
|
||||||
version: '2'
|
version: "2"
|
||||||
services:
|
services:
|
||||||
|
|
||||||
kafka-ui:
|
kafka-ui:
|
||||||
container_name: kafka-ui
|
container_name: kafka-ui
|
||||||
image: provectuslabs/kafka-ui:latest
|
image: provectuslabs/kafka-ui:latest
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- zookeeper1
|
|
||||||
- kafka0
|
- kafka0
|
||||||
- kafka1
|
|
||||||
- schemaregistry0
|
- schemaregistry0
|
||||||
- kafka-connect0
|
- kafka-connect0
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
|
||||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME: admin
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_USERNAME: admin
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD: admin-secret
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_PASSWORD: admin-secret
|
||||||
KAFKA_CLUSTERS_0_KSQLDBSERVER: http://ksqldb:8088
|
|
||||||
|
|
||||||
zookeeper0:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2181:2181
|
|
||||||
|
|
||||||
kafka0:
|
kafka0:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
depends_on:
|
hostname: kafka0
|
||||||
- zookeeper0
|
container_name: kafka0
|
||||||
ports:
|
ports:
|
||||||
- 9092:9092
|
- "9092:9092"
|
||||||
- 9997:9997
|
- "9997:9997"
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: "CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT"
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092"
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9997
|
KAFKA_JMX_PORT: 9997
|
||||||
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||||
|
KAFKA_PROCESS_ROLES: "broker,controller"
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: "1@kafka0:29093"
|
||||||
|
KAFKA_LISTENERS: "PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092"
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: "PLAINTEXT"
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: "CONTROLLER"
|
||||||
|
KAFKA_LOG_DIRS: "/tmp/kraft-combined-logs"
|
||||||
|
volumes:
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
|
command: 'bash -c ''if [ ! -f /tmp/update_run.sh ]; then echo "ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'''
|
||||||
|
|
||||||
schemaregistry0:
|
schemaregistry0:
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
image: confluentinc/cp-schema-registry:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 8085:8085
|
- 8085:8085
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||||
|
@ -71,7 +65,6 @@ services:
|
||||||
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||||
|
|
||||||
|
|
||||||
kafka-connect0:
|
kafka-connect0:
|
||||||
build:
|
build:
|
||||||
context: ./kafka-connect
|
context: ./kafka-connect
|
||||||
|
@ -105,47 +98,17 @@ services:
|
||||||
CONNECT_REST_EXTENSION_CLASSES: "org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension"
|
CONNECT_REST_EXTENSION_CLASSES: "org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension"
|
||||||
KAFKA_OPTS: "-Djava.security.auth.login.config=/conf/kafka_connect.jaas"
|
KAFKA_OPTS: "-Djava.security.auth.login.config=/conf/kafka_connect.jaas"
|
||||||
|
|
||||||
# AWS_ACCESS_KEY_ID: ""
|
# AWS_ACCESS_KEY_ID: ""
|
||||||
# AWS_SECRET_ACCESS_KEY: ""
|
# AWS_SECRET_ACCESS_KEY: ""
|
||||||
|
|
||||||
kafka-init-topics:
|
kafka-init-topics:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
volumes:
|
volumes:
|
||||||
- ./message.json:/data/message.json
|
- ./message.json:/data/message.json
|
||||||
depends_on:
|
|
||||||
- kafka1
|
|
||||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
|
||||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
|
||||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
|
||||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
|
||||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
|
||||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
|
||||||
|
|
||||||
create-connectors:
|
|
||||||
image: ellerbrock/alpine-bash-curl-ssl
|
|
||||||
depends_on:
|
|
||||||
- postgres-db
|
|
||||||
- kafka-connect0
|
|
||||||
volumes:
|
|
||||||
- ./connectors:/connectors
|
|
||||||
command: bash -c '/connectors/start.sh'
|
|
||||||
|
|
||||||
ksqldb:
|
|
||||||
image: confluentinc/ksqldb-server:0.18.0
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka0
|
- kafka0
|
||||||
- kafka-connect0
|
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||||
- schemaregistry0
|
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||||
ports:
|
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
- 8088:8088
|
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
environment:
|
kafka-console-producer --bootstrap-server kafka0:29092 --topic users < /data/message.json'"
|
||||||
KSQL_CUB_KAFKA_TIMEOUT: 120
|
|
||||||
KSQL_LISTENERS: http://0.0.0.0:8088
|
|
||||||
KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
|
||||||
KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: "true"
|
|
||||||
KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: "true"
|
|
||||||
KSQL_KSQL_CONNECT_URL: http://kafka-connect0:8083
|
|
||||||
KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
|
||||||
KSQL_KSQL_SERVICE_ID: my_ksql_1
|
|
||||||
KSQL_KSQL_HIDDEN_TOPICS: '^_.*'
|
|
||||||
KSQL_CACHE_MAX_BYTES_BUFFERING: 0
|
|
||||||
|
|
|
@ -9,14 +9,12 @@ services:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
- 5005:5005
|
- 5005:5005
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
- schemaregistry0
|
- schemaregistry0
|
||||||
- kafka-connect0
|
- kafka-connect0
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
|
||||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||||
|
@ -34,29 +32,29 @@ services:
|
||||||
- ./jmx/clienttruststore:/jmx/clienttruststore
|
- ./jmx/clienttruststore:/jmx/clienttruststore
|
||||||
- ./jmx/clientkeystore:/jmx/clientkeystore
|
- ./jmx/clientkeystore:/jmx/clientkeystore
|
||||||
|
|
||||||
zookeeper0:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2181:2181
|
|
||||||
|
|
||||||
kafka0:
|
kafka0:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
depends_on:
|
hostname: kafka0
|
||||||
- zookeeper0
|
container_name: kafka0
|
||||||
ports:
|
ports:
|
||||||
- 9092:9092
|
- 9092:9092
|
||||||
- 9997:9997
|
- 9997:9997
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9997
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_JMX_PORT: 9997
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||||
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
# CHMOD 700 FOR JMXREMOTE.* FILES
|
# CHMOD 700 FOR JMXREMOTE.* FILES
|
||||||
KAFKA_JMX_OPTS: >-
|
KAFKA_JMX_OPTS: >-
|
||||||
-Dcom.sun.management.jmxremote
|
-Dcom.sun.management.jmxremote
|
||||||
|
@ -75,21 +73,21 @@ services:
|
||||||
-Djava.rmi.server.logCalls=true
|
-Djava.rmi.server.logCalls=true
|
||||||
# -Djavax.net.debug=ssl:handshake
|
# -Djavax.net.debug=ssl:handshake
|
||||||
volumes:
|
volumes:
|
||||||
- ./jmx/serverkeystore:/jmx/serverkeystore
|
- ./jmx/serverkeystore:/jmx/serverkeystore
|
||||||
- ./jmx/servertruststore:/jmx/servertruststore
|
- ./jmx/servertruststore:/jmx/servertruststore
|
||||||
- ./jmx/jmxremote.password:/jmx/jmxremote.password
|
- ./jmx/jmxremote.password:/jmx/jmxremote.password
|
||||||
- ./jmx/jmxremote.access:/jmx/jmxremote.access
|
- ./jmx/jmxremote.access:/jmx/jmxremote.access
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||||
|
|
||||||
schemaregistry0:
|
schemaregistry0:
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
image: confluentinc/cp-schema-registry:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 8085:8085
|
- 8085:8085
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||||
|
@ -99,7 +97,7 @@ services:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||||
|
|
||||||
kafka-connect0:
|
kafka-connect0:
|
||||||
image: confluentinc/cp-kafka-connect:6.0.1
|
image: confluentinc/cp-kafka-connect:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 8083:8083
|
- 8083:8083
|
||||||
depends_on:
|
depends_on:
|
||||||
|
@ -124,13 +122,13 @@ services:
|
||||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||||
|
|
||||||
kafka-init-topics:
|
kafka-init-topics:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
volumes:
|
volumes:
|
||||||
- ./message.json:/data/message.json
|
- ./message.json:/data/message.json
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka0
|
- kafka0
|
||||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||||
cub kafka-ready -b kafka0:29092 1 30 && \
|
cub kafka-ready -b kafka0:29092 1 30 && \
|
||||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-console-producer --broker-list kafka0:29092 -topic second.users < /data/message.json'"
|
kafka-console-producer --bootstrap-server kafka0:29092 --topic second.users < /data/message.json'"
|
|
@ -2,33 +2,33 @@
|
||||||
version: '2'
|
version: '2'
|
||||||
services:
|
services:
|
||||||
|
|
||||||
zookeeper0:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2181:2181
|
|
||||||
|
|
||||||
kafka0:
|
kafka0:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
# downloading jmx_exporter javaagent and starting kafka
|
hostname: kafka0
|
||||||
command: "/usr/share/jmx_exporter/kafka-prepare-and-run"
|
container_name: kafka0
|
||||||
depends_on:
|
ports:
|
||||||
- zookeeper0
|
- "9092:9092"
|
||||||
|
- "11001:11001"
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||||
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
KAFKA_OPTS: -javaagent:/usr/share/jmx_exporter/jmx_prometheus_javaagent.jar=11001:/usr/share/jmx_exporter/kafka-broker.yml
|
KAFKA_OPTS: -javaagent:/usr/share/jmx_exporter/jmx_prometheus_javaagent.jar=11001:/usr/share/jmx_exporter/kafka-broker.yml
|
||||||
ports:
|
|
||||||
- 9092:9092
|
|
||||||
- 11001:11001
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./jmx-exporter:/usr/share/jmx_exporter/
|
- ./jmx-exporter:/usr/share/jmx_exporter/
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /usr/share/jmx_exporter/kafka-prepare-and-run ; fi'"
|
||||||
|
|
||||||
kafka-ui:
|
kafka-ui:
|
||||||
container_name: kafka-ui
|
container_name: kafka-ui
|
||||||
|
@ -36,7 +36,6 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
|
|
|
@ -8,86 +8,89 @@ services:
|
||||||
ports:
|
ports:
|
||||||
- 8080:8080
|
- 8080:8080
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- zookeeper1
|
|
||||||
- kafka0
|
- kafka0
|
||||||
- kafka1
|
- kafka1
|
||||||
- schemaregistry0
|
- schemaregistry0
|
||||||
|
- schemaregistry1
|
||||||
- kafka-connect0
|
- kafka-connect0
|
||||||
environment:
|
environment:
|
||||||
KAFKA_CLUSTERS_0_NAME: local
|
KAFKA_CLUSTERS_0_NAME: local
|
||||||
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
||||||
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
|
||||||
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
KAFKA_CLUSTERS_0_METRICS_PORT: 9997
|
||||||
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
||||||
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
||||||
KAFKA_CLUSTERS_1_NAME: secondLocal
|
KAFKA_CLUSTERS_1_NAME: secondLocal
|
||||||
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
||||||
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
|
KAFKA_CLUSTERS_0_METRICS_PORT: 9998
|
||||||
KAFKA_CLUSTERS_1_METRICS_PORT: 9998
|
|
||||||
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
|
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry1:8085
|
||||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
|
|
||||||
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
|
||||||
|
|
||||||
zookeeper0:
|
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
|
||||||
environment:
|
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
|
||||||
ports:
|
|
||||||
- 2181:2181
|
|
||||||
|
|
||||||
kafka0:
|
kafka0:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
depends_on:
|
hostname: kafka0
|
||||||
- zookeeper0
|
container_name: kafka0
|
||||||
ports:
|
ports:
|
||||||
- 9092:9092
|
- "9092:9092"
|
||||||
- 9997:9997
|
- "9997:9997"
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9997
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_JMX_PORT: 9997
|
||||||
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
zookeeper1:
|
KAFKA_NODE_ID: 1
|
||||||
image: confluentinc/cp-zookeeper:5.2.4
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka0:29093'
|
||||||
environment:
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka0:29092,CONTROLLER://kafka0:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
ZOOKEEPER_CLIENT_PORT: 2181
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||||
ZOOKEEPER_TICK_TIME: 2000
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
|
volumes:
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||||
|
|
||||||
kafka1:
|
kafka1:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
depends_on:
|
hostname: kafka1
|
||||||
- zookeeper1
|
container_name: kafka1
|
||||||
ports:
|
ports:
|
||||||
- 9093:9093
|
- "9093:9092"
|
||||||
- 9998:9998
|
- "9998:9998"
|
||||||
environment:
|
environment:
|
||||||
KAFKA_BROKER_ID: 1
|
KAFKA_BROKER_ID: 1
|
||||||
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT'
|
||||||
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9092'
|
||||||
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
||||||
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
||||||
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
JMX_PORT: 9998
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9998
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_JMX_PORT: 9998
|
||||||
|
KAFKA_JMX_HOSTNAME: localhost
|
||||||
|
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9998
|
||||||
|
KAFKA_PROCESS_ROLES: 'broker,controller'
|
||||||
|
KAFKA_NODE_ID: 1
|
||||||
|
KAFKA_CONTROLLER_QUORUM_VOTERS: '1@kafka1:29093'
|
||||||
|
KAFKA_LISTENERS: 'PLAINTEXT://kafka1:29092,CONTROLLER://kafka1:29093,PLAINTEXT_HOST://0.0.0.0:9092'
|
||||||
|
KAFKA_INTER_BROKER_LISTENER_NAME: 'PLAINTEXT'
|
||||||
|
KAFKA_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
|
||||||
|
KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs'
|
||||||
|
volumes:
|
||||||
|
- ./scripts/update_run.sh:/tmp/update_run.sh
|
||||||
|
command: "bash -c 'if [ ! -f /tmp/update_run.sh ]; then echo \"ERROR: Did you forget the update_run.sh file that came with this docker-compose.yml file?\" && exit 1 ; else /tmp/update_run.sh && /etc/confluent/docker/run ; fi'"
|
||||||
|
|
||||||
schemaregistry0:
|
schemaregistry0:
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
image: confluentinc/cp-schema-registry:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 8085:8085
|
- 8085:8085
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper0
|
|
||||||
- kafka0
|
- kafka0
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
||||||
|
@ -97,15 +100,13 @@ services:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
||||||
|
|
||||||
schemaregistry1:
|
schemaregistry1:
|
||||||
image: confluentinc/cp-schema-registry:5.5.0
|
image: confluentinc/cp-schema-registry:7.2.1
|
||||||
ports:
|
ports:
|
||||||
- 18085:8085
|
- 18085:8085
|
||||||
depends_on:
|
depends_on:
|
||||||
- zookeeper1
|
|
||||||
- kafka1
|
- kafka1
|
||||||
environment:
|
environment:
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
|
||||||
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
||||||
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
||||||
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
||||||
|
@ -140,14 +141,14 @@ services:
|
||||||
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
||||||
|
|
||||||
kafka-init-topics:
|
kafka-init-topics:
|
||||||
image: confluentinc/cp-kafka:5.3.1
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
volumes:
|
volumes:
|
||||||
- ./message.json:/data/message.json
|
- ./message.json:/data/message.json
|
||||||
depends_on:
|
depends_on:
|
||||||
- kafka1
|
- kafka1
|
||||||
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||||
cub kafka-ready -b kafka1:29092 1 30 && \
|
cub kafka-ready -b kafka1:29092 1 30 && \
|
||||||
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
|
||||||
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka1:29092 && \
|
||||||
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka0:29092 && \
|
||||||
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|
kafka-console-producer --bootstrap-server kafka1:29092 -topic second.users < /data/message.json'"
|
||||||
|
|
48
documentation/compose/kafka-with-zookeeper.yaml
Normal file
48
documentation/compose/kafka-with-zookeeper.yaml
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
---
|
||||||
|
version: '2'
|
||||||
|
services:
|
||||||
|
|
||||||
|
zookeeper:
|
||||||
|
image: confluentinc/cp-zookeeper:7.2.1
|
||||||
|
hostname: zookeeper
|
||||||
|
container_name: zookeeper
|
||||||
|
ports:
|
||||||
|
- "2181:2181"
|
||||||
|
environment:
|
||||||
|
ZOOKEEPER_CLIENT_PORT: 2181
|
||||||
|
ZOOKEEPER_TICK_TIME: 2000
|
||||||
|
|
||||||
|
kafka:
|
||||||
|
image: confluentinc/cp-server:7.2.1
|
||||||
|
hostname: kafka
|
||||||
|
container_name: kafka
|
||||||
|
depends_on:
|
||||||
|
- zookeeper
|
||||||
|
ports:
|
||||||
|
- "9092:9092"
|
||||||
|
- "9997:9997"
|
||||||
|
environment:
|
||||||
|
KAFKA_BROKER_ID: 1
|
||||||
|
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
||||||
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
||||||
|
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
|
||||||
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
|
||||||
|
KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
|
||||||
|
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
|
||||||
|
KAFKA_JMX_PORT: 9997
|
||||||
|
KAFKA_JMX_HOSTNAME: kafka
|
||||||
|
|
||||||
|
kafka-init-topics:
|
||||||
|
image: confluentinc/cp-kafka:7.2.1
|
||||||
|
volumes:
|
||||||
|
- ./message.json:/data/message.json
|
||||||
|
depends_on:
|
||||||
|
- kafka
|
||||||
|
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
||||||
|
cub kafka-ready -b kafka:29092 1 30 && \
|
||||||
|
kafka-topics --create --topic users --partitions 3 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 && \
|
||||||
|
kafka-topics --create --topic messages --partitions 2 --replication-factor 1 --if-not-exists --bootstrap-server kafka:29092 && \
|
||||||
|
kafka-console-producer --bootstrap-server kafka:29092 --topic users < /data/message.json'"
|
1
documentation/compose/scripts/clusterID
Normal file
1
documentation/compose/scripts/clusterID
Normal file
|
@ -0,0 +1 @@
|
||||||
|
zlFiTJelTOuhnklFwLWixw
|
1
documentation/compose/scripts/create_cluster_id.sh
Normal file
1
documentation/compose/scripts/create_cluster_id.sh
Normal file
|
@ -0,0 +1 @@
|
||||||
|
kafka-storage random-uuid > /workspace/kafka-ui/documentation/compose/clusterID
|
11
documentation/compose/scripts/update_run_cluster.sh
Normal file
11
documentation/compose/scripts/update_run_cluster.sh
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# This script is required to run kafka cluster (without zookeeper)
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
# Docker workaround: Remove check for KAFKA_ZOOKEEPER_CONNECT parameter
|
||||||
|
sed -i '/KAFKA_ZOOKEEPER_CONNECT/d' /etc/confluent/docker/configure
|
||||||
|
|
||||||
|
# Docker workaround: Ignore cub zk-ready
|
||||||
|
sed -i 's/cub zk-ready/echo ignore zk-ready/' /etc/confluent/docker/ensure
|
||||||
|
|
||||||
|
# KRaft required step: Format the storage directory with a new cluster ID
|
||||||
|
echo "kafka-storage format --ignore-formatted -t $(cat /tmp/clusterID) -c /etc/kafka/kafka.properties" >> /etc/confluent/docker/ensure
|
|
@ -58,7 +58,6 @@ For Azure AD (Office365) OAUTH2 you'll want to add additional environment variab
|
||||||
docker run -p 8080:8080 \
|
docker run -p 8080:8080 \
|
||||||
-e KAFKA_CLUSTERS_0_NAME="${cluster_name}"\
|
-e KAFKA_CLUSTERS_0_NAME="${cluster_name}"\
|
||||||
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS="${kafka_listeners}" \
|
-e KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS="${kafka_listeners}" \
|
||||||
-e KAFKA_CLUSTERS_0_ZOOKEEPER="${zookeeper_servers}" \
|
|
||||||
-e KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS="${kafka_connect_servers}"
|
-e KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS="${kafka_connect_servers}"
|
||||||
-e AUTH_TYPE=OAUTH2 \
|
-e AUTH_TYPE=OAUTH2 \
|
||||||
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
|
-e SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_AUTH0_CLIENTID=uhvaPKIHU4ZF8Ne4B6PGvF0hWW6OcUSB \
|
||||||
|
|
|
@ -173,6 +173,12 @@
|
||||||
<version>${mockito.version}</version>
|
<version>${mockito.version}</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>net.bytebuddy</groupId>
|
||||||
|
<artifactId>byte-buddy</artifactId>
|
||||||
|
<version>${byte-buddy.version}</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.assertj</groupId>
|
<groupId>org.assertj</groupId>
|
||||||
<artifactId>assertj-core</artifactId>
|
<artifactId>assertj-core</artifactId>
|
||||||
|
|
|
@ -1,53 +0,0 @@
|
||||||
package com.provectus.kafka.ui.client;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.service.ksql.KsqlApiClient;
|
|
||||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.springframework.http.HttpStatus;
|
|
||||||
import org.springframework.http.MediaType;
|
|
||||||
import org.springframework.stereotype.Service;
|
|
||||||
import org.springframework.web.reactive.function.BodyInserters;
|
|
||||||
import org.springframework.web.reactive.function.client.ClientResponse;
|
|
||||||
import org.springframework.web.reactive.function.client.WebClient;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
|
|
||||||
@Service
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
@Slf4j
|
|
||||||
public class KsqlClient {
|
|
||||||
private final WebClient webClient;
|
|
||||||
private final ObjectMapper mapper;
|
|
||||||
|
|
||||||
public Mono<KsqlCommandResponseDTO> execute(BaseStrategy ksqlStatement, KafkaCluster cluster) {
|
|
||||||
return webClient.post()
|
|
||||||
.uri(ksqlStatement.getUri())
|
|
||||||
.headers(httpHeaders -> KsqlApiClient.setBasicAuthIfEnabled(httpHeaders, cluster))
|
|
||||||
.accept(new MediaType("application", "vnd.ksql.v1+json"))
|
|
||||||
.body(BodyInserters.fromValue(ksqlStatement.getKsqlCommand()))
|
|
||||||
.retrieve()
|
|
||||||
.onStatus(HttpStatus::isError, this::getErrorMessage)
|
|
||||||
.bodyToMono(byte[].class)
|
|
||||||
.map(this::toJson)
|
|
||||||
.map(ksqlStatement::serializeResponse);
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<Throwable> getErrorMessage(ClientResponse response) {
|
|
||||||
return response
|
|
||||||
.bodyToMono(byte[].class)
|
|
||||||
.map(this::toJson)
|
|
||||||
.map(jsonNode -> jsonNode.get("message").asText())
|
|
||||||
.flatMap(error -> Mono.error(new UnprocessableEntityException(error)));
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private JsonNode toJson(byte[] content) {
|
|
||||||
return this.mapper.readTree(content);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -53,17 +53,6 @@ public class ConsumerGroupsController extends AbstractController implements Cons
|
||||||
.map(ResponseEntity::ok);
|
.map(ResponseEntity::ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getConsumerGroups(String clusterName,
|
|
||||||
ServerWebExchange exchange) {
|
|
||||||
return consumerGroupService.getAllConsumerGroups(getCluster(clusterName))
|
|
||||||
.map(Flux::fromIterable)
|
|
||||||
.map(f -> f.map(ConsumerGroupMapper::toDto))
|
|
||||||
.map(ResponseEntity::ok)
|
|
||||||
.switchIfEmpty(Mono.just(ResponseEntity.notFound().build()));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(
|
public Mono<ResponseEntity<Flux<ConsumerGroupDTO>>> getTopicConsumerGroups(
|
||||||
String clusterName, String topicName, ServerWebExchange exchange) {
|
String clusterName, String topicName, ServerWebExchange exchange) {
|
||||||
|
|
|
@ -17,9 +17,16 @@ public class InfoController extends AbstractController implements TimeStampForma
|
||||||
|
|
||||||
@Value("${timestamp.format:dd.MM.YYYY HH:mm:ss}")
|
@Value("${timestamp.format:dd.MM.YYYY HH:mm:ss}")
|
||||||
private String timeStampFormat;
|
private String timeStampFormat;
|
||||||
|
@Value("${timestamp.format:DD.MM.YYYY HH:mm:ss}")
|
||||||
|
private String timeStampFormatIso;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<TimeStampFormatDTO>> getTimeStampFormat(ServerWebExchange exchange) {
|
public Mono<ResponseEntity<TimeStampFormatDTO>> getTimeStampFormat(ServerWebExchange exchange) {
|
||||||
return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormat)));
|
return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormat)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Mono<ResponseEntity<TimeStampFormatDTO>> getTimeStampFormatISO(ServerWebExchange exchange) {
|
||||||
|
return Mono.just(ResponseEntity.ok(new TimeStampFormatDTO().timeStampFormat(timeStampFormatIso)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,15 +1,12 @@
|
||||||
package com.provectus.kafka.ui.controller;
|
package com.provectus.kafka.ui.controller;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.api.KsqlApi;
|
import com.provectus.kafka.ui.api.KsqlApi;
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandV2DTO;
|
import com.provectus.kafka.ui.model.KsqlCommandV2DTO;
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandV2ResponseDTO;
|
import com.provectus.kafka.ui.model.KsqlCommandV2ResponseDTO;
|
||||||
import com.provectus.kafka.ui.model.KsqlResponseDTO;
|
import com.provectus.kafka.ui.model.KsqlResponseDTO;
|
||||||
import com.provectus.kafka.ui.model.KsqlStreamDescriptionDTO;
|
import com.provectus.kafka.ui.model.KsqlStreamDescriptionDTO;
|
||||||
import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
|
import com.provectus.kafka.ui.model.KsqlTableDescriptionDTO;
|
||||||
import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
|
import com.provectus.kafka.ui.model.KsqlTableResponseDTO;
|
||||||
import com.provectus.kafka.ui.service.KsqlService;
|
|
||||||
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
|
import com.provectus.kafka.ui.service.ksql.KsqlServiceV2;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -27,17 +24,8 @@ import reactor.core.publisher.Mono;
|
||||||
@RequiredArgsConstructor
|
@RequiredArgsConstructor
|
||||||
@Slf4j
|
@Slf4j
|
||||||
public class KsqlController extends AbstractController implements KsqlApi {
|
public class KsqlController extends AbstractController implements KsqlApi {
|
||||||
private final KsqlService ksqlService;
|
|
||||||
private final KsqlServiceV2 ksqlServiceV2;
|
|
||||||
|
|
||||||
@Override
|
private final KsqlServiceV2 ksqlServiceV2;
|
||||||
public Mono<ResponseEntity<KsqlCommandResponseDTO>> executeKsqlCommand(String clusterName,
|
|
||||||
Mono<KsqlCommandDTO>
|
|
||||||
ksqlCommand,
|
|
||||||
ServerWebExchange exchange) {
|
|
||||||
return ksqlService.executeKsqlCommand(getCluster(clusterName), ksqlCommand)
|
|
||||||
.map(ResponseEntity::ok);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
|
public Mono<ResponseEntity<KsqlCommandV2ResponseDTO>> executeKsql(String clusterName,
|
||||||
|
|
|
@ -5,6 +5,7 @@ import static com.provectus.kafka.ui.serde.api.Serde.Target.VALUE;
|
||||||
import static java.util.stream.Collectors.toMap;
|
import static java.util.stream.Collectors.toMap;
|
||||||
|
|
||||||
import com.provectus.kafka.ui.api.MessagesApi;
|
import com.provectus.kafka.ui.api.MessagesApi;
|
||||||
|
import com.provectus.kafka.ui.exception.ValidationException;
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
import com.provectus.kafka.ui.model.CreateTopicMessageDTO;
|
||||||
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
|
import com.provectus.kafka.ui.model.MessageFilterTypeDTO;
|
||||||
|
@ -18,6 +19,7 @@ import com.provectus.kafka.ui.service.MessagesService;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
import java.util.Optional;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
import javax.validation.Valid;
|
import javax.validation.Valid;
|
||||||
import lombok.RequiredArgsConstructor;
|
import lombok.RequiredArgsConstructor;
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
@ -63,18 +65,22 @@ public class MessagesController extends AbstractController implements MessagesAp
|
||||||
String keySerde,
|
String keySerde,
|
||||||
String valueSerde,
|
String valueSerde,
|
||||||
ServerWebExchange exchange) {
|
ServerWebExchange exchange) {
|
||||||
|
seekType = seekType != null ? seekType : SeekTypeDTO.BEGINNING;
|
||||||
|
seekDirection = seekDirection != null ? seekDirection : SeekDirectionDTO.FORWARD;
|
||||||
|
filterQueryType = filterQueryType != null ? filterQueryType : MessageFilterTypeDTO.STRING_CONTAINS;
|
||||||
|
int recordsLimit =
|
||||||
|
Optional.ofNullable(limit).map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT)).orElse(DEFAULT_LOAD_RECORD_LIMIT);
|
||||||
|
|
||||||
var positions = new ConsumerPosition(
|
var positions = new ConsumerPosition(
|
||||||
seekType != null ? seekType : SeekTypeDTO.BEGINNING,
|
seekType,
|
||||||
parseSeekTo(topicName, seekTo),
|
topicName,
|
||||||
seekDirection
|
parseSeekTo(topicName, seekType, seekTo)
|
||||||
);
|
);
|
||||||
int recordsLimit = Optional.ofNullable(limit)
|
|
||||||
.map(s -> Math.min(s, MAX_LOAD_RECORD_LIMIT))
|
|
||||||
.orElse(DEFAULT_LOAD_RECORD_LIMIT);
|
|
||||||
return Mono.just(
|
return Mono.just(
|
||||||
ResponseEntity.ok(
|
ResponseEntity.ok(
|
||||||
messagesService.loadMessages(
|
messagesService.loadMessages(
|
||||||
getCluster(clusterName), topicName, positions, q, filterQueryType, recordsLimit, keySerde, valueSerde)
|
getCluster(clusterName), topicName, positions, q, filterQueryType,
|
||||||
|
recordsLimit, seekDirection, keySerde, valueSerde)
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -92,9 +98,13 @@ public class MessagesController extends AbstractController implements MessagesAp
|
||||||
* The format is [partition]::[offset] for specifying offsets
|
* The format is [partition]::[offset] for specifying offsets
|
||||||
* or [partition]::[timestamp in millis] for specifying timestamps.
|
* or [partition]::[timestamp in millis] for specifying timestamps.
|
||||||
*/
|
*/
|
||||||
private Map<TopicPartition, Long> parseSeekTo(String topic, List<String> seekTo) {
|
@Nullable
|
||||||
|
private Map<TopicPartition, Long> parseSeekTo(String topic, SeekTypeDTO seekType, List<String> seekTo) {
|
||||||
if (seekTo == null || seekTo.isEmpty()) {
|
if (seekTo == null || seekTo.isEmpty()) {
|
||||||
return Map.of();
|
if (seekType == SeekTypeDTO.LATEST || seekType == SeekTypeDTO.BEGINNING) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
throw new ValidationException("seekTo should be set if seekType is " + seekType);
|
||||||
}
|
}
|
||||||
return seekTo.stream()
|
return seekTo.stream()
|
||||||
.map(p -> {
|
.map(p -> {
|
||||||
|
|
|
@ -1,21 +1,18 @@
|
||||||
package com.provectus.kafka.ui.emitter;
|
package com.provectus.kafka.ui.emitter;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
|
||||||
import java.util.SortedMap;
|
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.function.Function;
|
import java.util.function.Supplier;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
import org.apache.kafka.clients.consumer.Consumer;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerConfig;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
@ -29,80 +26,68 @@ public class BackwardRecordEmitter
|
||||||
|
|
||||||
private static final Duration POLL_TIMEOUT = Duration.ofMillis(200);
|
private static final Duration POLL_TIMEOUT = Duration.ofMillis(200);
|
||||||
|
|
||||||
private final Function<Map<String, Object>, KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||||
private final OffsetsSeekBackward offsetsSeek;
|
private final ConsumerPosition consumerPosition;
|
||||||
|
private final int messagesPerPage;
|
||||||
|
|
||||||
public BackwardRecordEmitter(
|
public BackwardRecordEmitter(
|
||||||
Function<Map<String, Object>, KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||||
OffsetsSeekBackward offsetsSeek,
|
ConsumerPosition consumerPosition,
|
||||||
|
int messagesPerPage,
|
||||||
ConsumerRecordDeserializer recordDeserializer) {
|
ConsumerRecordDeserializer recordDeserializer) {
|
||||||
super(recordDeserializer);
|
super(recordDeserializer);
|
||||||
this.offsetsSeek = offsetsSeek;
|
this.consumerPosition = consumerPosition;
|
||||||
|
this.messagesPerPage = messagesPerPage;
|
||||||
this.consumerSupplier = consumerSupplier;
|
this.consumerSupplier = consumerSupplier;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||||
try (KafkaConsumer<Bytes, Bytes> configConsumer = consumerSupplier.apply(Map.of())) {
|
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||||
final List<TopicPartition> requestedPartitions =
|
sendPhase(sink, "Created consumer");
|
||||||
offsetsSeek.getRequestedPartitions(configConsumer);
|
|
||||||
sendPhase(sink, "Request partitions");
|
|
||||||
final int msgsPerPartition = offsetsSeek.msgsPerPartition(requestedPartitions.size());
|
|
||||||
try (KafkaConsumer<Bytes, Bytes> consumer =
|
|
||||||
consumerSupplier.apply(
|
|
||||||
Map.of(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, msgsPerPartition)
|
|
||||||
)
|
|
||||||
) {
|
|
||||||
sendPhase(sink, "Created consumer");
|
|
||||||
|
|
||||||
SortedMap<TopicPartition, Long> readUntilOffsets =
|
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||||
new TreeMap<>(Comparator.comparingInt(TopicPartition::partition));
|
var readUntilOffsets = new TreeMap<TopicPartition, Long>(Comparator.comparingInt(TopicPartition::partition));
|
||||||
readUntilOffsets.putAll(offsetsSeek.getPartitionsOffsets(consumer));
|
readUntilOffsets.putAll(seekOperations.getOffsetsForSeek());
|
||||||
|
|
||||||
sendPhase(sink, "Requested partitions offsets");
|
int msgsToPollPerPartition = (int) Math.ceil((double) messagesPerPage / readUntilOffsets.size());
|
||||||
log.debug("partition offsets: {}", readUntilOffsets);
|
log.debug("'Until' offsets for polling: {}", readUntilOffsets);
|
||||||
var waitingOffsets =
|
|
||||||
offsetsSeek.waitingOffsets(consumer, readUntilOffsets.keySet());
|
|
||||||
log.debug("waiting offsets {} {}",
|
|
||||||
waitingOffsets.getBeginOffsets(),
|
|
||||||
waitingOffsets.getEndOffsets()
|
|
||||||
);
|
|
||||||
|
|
||||||
while (!sink.isCancelled() && !waitingOffsets.beginReached()) {
|
while (!sink.isCancelled() && !readUntilOffsets.isEmpty()) {
|
||||||
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
|
new TreeMap<>(readUntilOffsets).forEach((tp, readToOffset) -> {
|
||||||
long lowestOffset = waitingOffsets.getBeginOffsets().get(tp.partition());
|
if (sink.isCancelled()) {
|
||||||
long readFromOffset = Math.max(lowestOffset, readToOffset - msgsPerPartition);
|
return; //fast return in case of sink cancellation
|
||||||
|
|
||||||
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
|
|
||||||
.stream()
|
|
||||||
.filter(r -> !sink.isCancelled())
|
|
||||||
.forEach(r -> sendMessage(sink, r));
|
|
||||||
|
|
||||||
waitingOffsets.markPolled(tp.partition(), readFromOffset);
|
|
||||||
if (waitingOffsets.getBeginOffsets().get(tp.partition()) == null) {
|
|
||||||
// we fully read this partition -> removing it from polling iterations
|
|
||||||
readUntilOffsets.remove(tp);
|
|
||||||
} else {
|
|
||||||
readUntilOffsets.put(tp, readFromOffset);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (waitingOffsets.beginReached()) {
|
|
||||||
log.debug("begin reached after partitions poll iteration");
|
|
||||||
} else if (sink.isCancelled()) {
|
|
||||||
log.debug("sink is cancelled after partitions poll iteration");
|
|
||||||
}
|
}
|
||||||
|
long beginOffset = seekOperations.getBeginOffsets().get(tp);
|
||||||
|
long readFromOffset = Math.max(beginOffset, readToOffset - msgsToPollPerPartition);
|
||||||
|
|
||||||
|
partitionPollIteration(tp, readFromOffset, readToOffset, consumer, sink)
|
||||||
|
.stream()
|
||||||
|
.filter(r -> !sink.isCancelled())
|
||||||
|
.forEach(r -> sendMessage(sink, r));
|
||||||
|
|
||||||
|
if (beginOffset == readFromOffset) {
|
||||||
|
// we fully read this partition -> removing it from polling iterations
|
||||||
|
readUntilOffsets.remove(tp);
|
||||||
|
} else {
|
||||||
|
// updating 'to' offset for next polling iteration
|
||||||
|
readUntilOffsets.put(tp, readFromOffset);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (readUntilOffsets.isEmpty()) {
|
||||||
|
log.debug("begin reached after partitions poll iteration");
|
||||||
|
} else if (sink.isCancelled()) {
|
||||||
|
log.debug("sink is cancelled after partitions poll iteration");
|
||||||
}
|
}
|
||||||
sink.complete();
|
|
||||||
log.debug("Polling finished");
|
|
||||||
}
|
}
|
||||||
|
sink.complete();
|
||||||
|
log.debug("Polling finished");
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
log.error("Error occurred while consuming records", e);
|
log.error("Error occurred while consuming records", e);
|
||||||
sink.error(e);
|
sink.error(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
|
private List<ConsumerRecord<Bytes, Bytes>> partitionPollIteration(
|
||||||
TopicPartition tp,
|
TopicPartition tp,
|
||||||
long fromOffset,
|
long fromOffset,
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
package com.provectus.kafka.ui.emitter;
|
package com.provectus.kafka.ui.emitter;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeek;
|
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
||||||
|
@ -17,34 +17,38 @@ public class ForwardRecordEmitter
|
||||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||||
|
|
||||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||||
private final OffsetsSeek offsetsSeek;
|
private final ConsumerPosition position;
|
||||||
|
|
||||||
public ForwardRecordEmitter(
|
public ForwardRecordEmitter(
|
||||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||||
OffsetsSeek offsetsSeek,
|
ConsumerPosition position,
|
||||||
ConsumerRecordDeserializer recordDeserializer) {
|
ConsumerRecordDeserializer recordDeserializer) {
|
||||||
super(recordDeserializer);
|
super(recordDeserializer);
|
||||||
|
this.position = position;
|
||||||
this.consumerSupplier = consumerSupplier;
|
this.consumerSupplier = consumerSupplier;
|
||||||
this.offsetsSeek = offsetsSeek;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||||
sendPhase(sink, "Assigning partitions");
|
sendPhase(sink, "Assigning partitions");
|
||||||
var waitingOffsets = offsetsSeek.assignAndSeek(consumer);
|
var seekOperations = SeekOperations.create(consumer, position);
|
||||||
|
seekOperations.assignAndSeekNonEmptyPartitions();
|
||||||
|
|
||||||
// we use empty polls counting to verify that topic was fully read
|
// we use empty polls counting to verify that topic was fully read
|
||||||
int emptyPolls = 0;
|
int emptyPolls = 0;
|
||||||
while (!sink.isCancelled() && !waitingOffsets.endReached() && emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT) {
|
while (!sink.isCancelled()
|
||||||
|
&& !seekOperations.assignedPartitionsFullyPolled()
|
||||||
|
&& emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT) {
|
||||||
|
|
||||||
sendPhase(sink, "Polling");
|
sendPhase(sink, "Polling");
|
||||||
ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
|
ConsumerRecords<Bytes, Bytes> records = poll(sink, consumer);
|
||||||
log.info("{} records polled", records.count());
|
log.info("{} records polled", records.count());
|
||||||
emptyPolls = records.isEmpty() ? emptyPolls + 1 : 0;
|
emptyPolls = records.isEmpty() ? emptyPolls + 1 : 0;
|
||||||
|
|
||||||
for (ConsumerRecord<Bytes, Bytes> msg : records) {
|
for (ConsumerRecord<Bytes, Bytes> msg : records) {
|
||||||
if (!sink.isCancelled() && !waitingOffsets.endReached()) {
|
if (!sink.isCancelled()) {
|
||||||
sendMessage(sink, msg);
|
sendMessage(sink, msg);
|
||||||
waitingOffsets.markPolled(msg);
|
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
package com.provectus.kafka.ui.emitter;
|
||||||
|
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import lombok.Getter;
|
||||||
|
import lombok.extern.slf4j.Slf4j;
|
||||||
|
import org.apache.kafka.clients.consumer.Consumer;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
|
||||||
|
@Slf4j
|
||||||
|
@Getter
|
||||||
|
public class OffsetsInfo {
|
||||||
|
|
||||||
|
private final Consumer<?, ?> consumer;
|
||||||
|
|
||||||
|
private final Map<TopicPartition, Long> beginOffsets;
|
||||||
|
private final Map<TopicPartition, Long> endOffsets;
|
||||||
|
|
||||||
|
private final Set<TopicPartition> nonEmptyPartitions = new HashSet<>();
|
||||||
|
private final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
||||||
|
|
||||||
|
public OffsetsInfo(Consumer<?, ?> consumer, String topic) {
|
||||||
|
this(consumer,
|
||||||
|
consumer.partitionsFor(topic).stream()
|
||||||
|
.map(pi -> new TopicPartition(topic, pi.partition()))
|
||||||
|
.collect(Collectors.toList())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public OffsetsInfo(Consumer<?, ?> consumer,
|
||||||
|
Collection<TopicPartition> targetPartitions) {
|
||||||
|
this.consumer = consumer;
|
||||||
|
this.beginOffsets = consumer.beginningOffsets(targetPartitions);
|
||||||
|
this.endOffsets = consumer.endOffsets(targetPartitions);
|
||||||
|
endOffsets.forEach((tp, endOffset) -> {
|
||||||
|
var beginningOffset = beginOffsets.get(tp);
|
||||||
|
if (endOffset > beginningOffset) {
|
||||||
|
nonEmptyPartitions.add(tp);
|
||||||
|
} else {
|
||||||
|
emptyPartitions.add(tp);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean assignedPartitionsFullyPolled() {
|
||||||
|
for (var tp: consumer.assignment()) {
|
||||||
|
Preconditions.checkArgument(endOffsets.containsKey(tp));
|
||||||
|
if (endOffsets.get(tp) > consumer.position(tp)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,111 @@
|
||||||
|
package com.provectus.kafka.ui.emitter;
|
||||||
|
|
||||||
|
import com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||||
|
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
|
import lombok.AccessLevel;
|
||||||
|
import lombok.RequiredArgsConstructor;
|
||||||
|
import org.apache.kafka.clients.consumer.Consumer;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
|
||||||
|
@RequiredArgsConstructor(access = AccessLevel.PACKAGE)
|
||||||
|
class SeekOperations {
|
||||||
|
|
||||||
|
private final Consumer<?, ?> consumer;
|
||||||
|
private final OffsetsInfo offsetsInfo;
|
||||||
|
private final Map<TopicPartition, Long> offsetsForSeek; //only contains non-empty partitions!
|
||||||
|
|
||||||
|
static SeekOperations create(Consumer<?, ?> consumer, ConsumerPosition consumerPosition) {
|
||||||
|
OffsetsInfo offsetsInfo;
|
||||||
|
if (consumerPosition.getSeekTo() == null) {
|
||||||
|
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getTopic());
|
||||||
|
} else {
|
||||||
|
offsetsInfo = new OffsetsInfo(consumer, consumerPosition.getSeekTo().keySet());
|
||||||
|
}
|
||||||
|
return new SeekOperations(
|
||||||
|
consumer,
|
||||||
|
offsetsInfo,
|
||||||
|
getOffsetsForSeek(consumer, offsetsInfo, consumerPosition.getSeekType(), consumerPosition.getSeekTo())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
void assignAndSeekNonEmptyPartitions() {
|
||||||
|
consumer.assign(offsetsForSeek.keySet());
|
||||||
|
offsetsForSeek.forEach(consumer::seek);
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<TopicPartition, Long> getBeginOffsets() {
|
||||||
|
return offsetsInfo.getBeginOffsets();
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<TopicPartition, Long> getEndOffsets() {
|
||||||
|
return offsetsInfo.getEndOffsets();
|
||||||
|
}
|
||||||
|
|
||||||
|
boolean assignedPartitionsFullyPolled() {
|
||||||
|
return offsetsInfo.assignedPartitionsFullyPolled();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get offsets to seek to. NOTE: offsets do not contain empty partitions offsets
|
||||||
|
Map<TopicPartition, Long> getOffsetsForSeek() {
|
||||||
|
return offsetsForSeek;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Finds offsets for ConsumerPosition. Note: will return empty map if no offsets found for desired criteria.
|
||||||
|
*/
|
||||||
|
@VisibleForTesting
|
||||||
|
static Map<TopicPartition, Long> getOffsetsForSeek(Consumer<?, ?> consumer,
|
||||||
|
OffsetsInfo offsetsInfo,
|
||||||
|
SeekTypeDTO seekType,
|
||||||
|
@Nullable Map<TopicPartition, Long> seekTo) {
|
||||||
|
switch (seekType) {
|
||||||
|
case LATEST:
|
||||||
|
return consumer.endOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||||
|
case BEGINNING:
|
||||||
|
return consumer.beginningOffsets(offsetsInfo.getNonEmptyPartitions());
|
||||||
|
case OFFSET:
|
||||||
|
Preconditions.checkNotNull(offsetsInfo);
|
||||||
|
return fixOffsets(offsetsInfo, seekTo);
|
||||||
|
case TIMESTAMP:
|
||||||
|
Preconditions.checkNotNull(offsetsInfo);
|
||||||
|
return offsetsForTimestamp(consumer, offsetsInfo, seekTo);
|
||||||
|
default:
|
||||||
|
throw new IllegalStateException();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Map<TopicPartition, Long> fixOffsets(OffsetsInfo offsetsInfo, Map<TopicPartition, Long> offsets) {
|
||||||
|
offsets = new HashMap<>(offsets);
|
||||||
|
offsets.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||||
|
|
||||||
|
Map<TopicPartition, Long> result = new HashMap<>();
|
||||||
|
offsets.forEach((tp, targetOffset) -> {
|
||||||
|
long endOffset = offsetsInfo.getEndOffsets().get(tp);
|
||||||
|
long beginningOffset = offsetsInfo.getBeginOffsets().get(tp);
|
||||||
|
// fixing offsets with min - max bounds
|
||||||
|
if (targetOffset > endOffset) {
|
||||||
|
targetOffset = endOffset;
|
||||||
|
} else if (targetOffset < beginningOffset) {
|
||||||
|
targetOffset = beginningOffset;
|
||||||
|
}
|
||||||
|
result.put(tp, targetOffset);
|
||||||
|
});
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Map<TopicPartition, Long> offsetsForTimestamp(Consumer<?, ?> consumer, OffsetsInfo offsetsInfo,
|
||||||
|
Map<TopicPartition, Long> timestamps) {
|
||||||
|
timestamps = new HashMap<>(timestamps);
|
||||||
|
timestamps.keySet().retainAll(offsetsInfo.getNonEmptyPartitions());
|
||||||
|
|
||||||
|
return consumer.offsetsForTimes(timestamps).entrySet().stream()
|
||||||
|
.filter(e -> e.getValue() != null)
|
||||||
|
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,8 +1,9 @@
|
||||||
package com.provectus.kafka.ui.emitter;
|
package com.provectus.kafka.ui.emitter;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.model.ConsumerPosition;
|
||||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeek;
|
import java.util.HashMap;
|
||||||
import java.util.function.Supplier;
|
import java.util.function.Supplier;
|
||||||
import lombok.extern.slf4j.Slf4j;
|
import lombok.extern.slf4j.Slf4j;
|
||||||
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
import org.apache.kafka.clients.consumer.KafkaConsumer;
|
||||||
|
@ -15,21 +16,21 @@ public class TailingEmitter extends AbstractEmitter
|
||||||
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
implements java.util.function.Consumer<FluxSink<TopicMessageEventDTO>> {
|
||||||
|
|
||||||
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
private final Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier;
|
||||||
private final OffsetsSeek offsetsSeek;
|
private final ConsumerPosition consumerPosition;
|
||||||
|
|
||||||
public TailingEmitter(ConsumerRecordDeserializer recordDeserializer,
|
public TailingEmitter(Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
||||||
Supplier<KafkaConsumer<Bytes, Bytes>> consumerSupplier,
|
ConsumerPosition consumerPosition,
|
||||||
OffsetsSeek offsetsSeek) {
|
ConsumerRecordDeserializer recordDeserializer) {
|
||||||
super(recordDeserializer);
|
super(recordDeserializer);
|
||||||
this.consumerSupplier = consumerSupplier;
|
this.consumerSupplier = consumerSupplier;
|
||||||
this.offsetsSeek = offsetsSeek;
|
this.consumerPosition = consumerPosition;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
public void accept(FluxSink<TopicMessageEventDTO> sink) {
|
||||||
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
try (KafkaConsumer<Bytes, Bytes> consumer = consumerSupplier.get()) {
|
||||||
log.debug("Starting topic tailing");
|
log.debug("Starting topic tailing");
|
||||||
offsetsSeek.assignAndSeek(consumer);
|
assignAndSeek(consumer);
|
||||||
while (!sink.isCancelled()) {
|
while (!sink.isCancelled()) {
|
||||||
sendPhase(sink, "Polling");
|
sendPhase(sink, "Polling");
|
||||||
var polled = poll(sink, consumer);
|
var polled = poll(sink, consumer);
|
||||||
|
@ -40,9 +41,17 @@ public class TailingEmitter extends AbstractEmitter
|
||||||
} catch (InterruptException kafkaInterruptException) {
|
} catch (InterruptException kafkaInterruptException) {
|
||||||
sink.complete();
|
sink.complete();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
log.error("Error consuming {}", offsetsSeek.getConsumerPosition(), e);
|
log.error("Error consuming {}", consumerPosition, e);
|
||||||
sink.error(e);
|
sink.error(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void assignAndSeek(KafkaConsumer<Bytes, Bytes> consumer) {
|
||||||
|
var seekOperations = SeekOperations.create(consumer, consumerPosition);
|
||||||
|
var seekOffsets = new HashMap<>(seekOperations.getEndOffsets()); // defaulting offsets to topic end
|
||||||
|
seekOffsets.putAll(seekOperations.getOffsetsForSeek()); // this will only set non-empty partitions
|
||||||
|
consumer.assign(seekOffsets.keySet());
|
||||||
|
seekOffsets.forEach(consumer::seek);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
package com.provectus.kafka.ui.model;
|
package com.provectus.kafka.ui.model;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import javax.annotation.Nullable;
|
||||||
import lombok.Value;
|
import lombok.Value;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
|
||||||
@Value
|
@Value
|
||||||
public class ConsumerPosition {
|
public class ConsumerPosition {
|
||||||
SeekTypeDTO seekType;
|
SeekTypeDTO seekType;
|
||||||
Map<TopicPartition, Long> seekTo;
|
String topic;
|
||||||
SeekDirectionDTO seekDirection;
|
@Nullable
|
||||||
|
Map<TopicPartition, Long> seekTo; // null if positioning should apply to all tps
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,12 +13,12 @@ public class InternalPartition {
|
||||||
private final int inSyncReplicasCount;
|
private final int inSyncReplicasCount;
|
||||||
private final int replicasCount;
|
private final int replicasCount;
|
||||||
|
|
||||||
private final long offsetMin;
|
private final Long offsetMin;
|
||||||
private final long offsetMax;
|
private final Long offsetMax;
|
||||||
|
|
||||||
// from log dir
|
// from log dir
|
||||||
private final long segmentSize;
|
private final Long segmentSize;
|
||||||
private final long segmentCount;
|
private final Integer segmentCount;
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -42,9 +42,7 @@ public class InternalTopic {
|
||||||
Metrics metrics,
|
Metrics metrics,
|
||||||
InternalLogDirStats logDirInfo) {
|
InternalLogDirStats logDirInfo) {
|
||||||
var topic = InternalTopic.builder();
|
var topic = InternalTopic.builder();
|
||||||
topic.internal(
|
topic.internal(topicDescription.isInternal());
|
||||||
topicDescription.isInternal() || topicDescription.name().startsWith("_")
|
|
||||||
);
|
|
||||||
topic.name(topicDescription.name());
|
topic.name(topicDescription.name());
|
||||||
|
|
||||||
List<InternalPartition> partitions = topicDescription.partitions().stream()
|
List<InternalPartition> partitions = topicDescription.partitions().stream()
|
||||||
|
|
|
@ -49,8 +49,8 @@ public class ConsumerGroupService {
|
||||||
var tpsFromGroupOffsets = groupOffsetsMap.values().stream()
|
var tpsFromGroupOffsets = groupOffsetsMap.values().stream()
|
||||||
.flatMap(v -> v.keySet().stream())
|
.flatMap(v -> v.keySet().stream())
|
||||||
.collect(Collectors.toSet());
|
.collect(Collectors.toSet());
|
||||||
// 2. getting end offsets for partitions with in committed offsets
|
// 2. getting end offsets for partitions with committed offsets
|
||||||
return ac.listOffsets(tpsFromGroupOffsets, OffsetSpec.latest())
|
return ac.listOffsets(tpsFromGroupOffsets, OffsetSpec.latest(), false)
|
||||||
.map(endOffsets ->
|
.map(endOffsets ->
|
||||||
descriptions.stream()
|
descriptions.stream()
|
||||||
.map(desc -> {
|
.map(desc -> {
|
||||||
|
@ -64,18 +64,11 @@ public class ConsumerGroupService {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Deprecated // need to migrate to pagination
|
|
||||||
public Mono<List<InternalConsumerGroup>> getAllConsumerGroups(KafkaCluster cluster) {
|
|
||||||
return adminClientService.get(cluster)
|
|
||||||
.flatMap(ac -> describeConsumerGroups(ac, null)
|
|
||||||
.flatMap(descriptions -> getConsumerGroups(ac, descriptions)));
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<List<InternalTopicConsumerGroup>> getConsumerGroupsForTopic(KafkaCluster cluster,
|
public Mono<List<InternalTopicConsumerGroup>> getConsumerGroupsForTopic(KafkaCluster cluster,
|
||||||
String topic) {
|
String topic) {
|
||||||
return adminClientService.get(cluster)
|
return adminClientService.get(cluster)
|
||||||
// 1. getting topic's end offsets
|
// 1. getting topic's end offsets
|
||||||
.flatMap(ac -> ac.listOffsets(topic, OffsetSpec.latest())
|
.flatMap(ac -> ac.listTopicOffsets(topic, OffsetSpec.latest(), false)
|
||||||
.flatMap(endOffsets -> {
|
.flatMap(endOffsets -> {
|
||||||
var tps = new ArrayList<>(endOffsets.keySet());
|
var tps = new ArrayList<>(endOffsets.keySet());
|
||||||
// 2. getting all consumer groups
|
// 2. getting all consumer groups
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
package com.provectus.kafka.ui.service;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.client.KsqlClient;
|
|
||||||
import com.provectus.kafka.ui.exception.ClusterNotFoundException;
|
|
||||||
import com.provectus.kafka.ui.exception.KsqlDbNotFoundException;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
|
||||||
import java.util.List;
|
|
||||||
import lombok.RequiredArgsConstructor;
|
|
||||||
import org.springframework.stereotype.Service;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
|
|
||||||
@Service
|
|
||||||
@RequiredArgsConstructor
|
|
||||||
public class KsqlService {
|
|
||||||
private final KsqlClient ksqlClient;
|
|
||||||
private final List<BaseStrategy> ksqlStatementStrategies;
|
|
||||||
|
|
||||||
public Mono<KsqlCommandResponseDTO> executeKsqlCommand(KafkaCluster cluster,
|
|
||||||
Mono<KsqlCommandDTO> ksqlCommand) {
|
|
||||||
return Mono.justOrEmpty(cluster)
|
|
||||||
.map(KafkaCluster::getKsqldbServer)
|
|
||||||
.onErrorResume(e -> {
|
|
||||||
Throwable throwable =
|
|
||||||
e instanceof ClusterNotFoundException ? e : new KsqlDbNotFoundException();
|
|
||||||
return Mono.error(throwable);
|
|
||||||
})
|
|
||||||
.flatMap(ksqlServer -> getStatementStrategyForKsqlCommand(ksqlCommand)
|
|
||||||
.map(statement -> statement.host(ksqlServer.getUrl()))
|
|
||||||
)
|
|
||||||
.flatMap(baseStrategy -> ksqlClient.execute(baseStrategy, cluster));
|
|
||||||
}
|
|
||||||
|
|
||||||
private Mono<BaseStrategy> getStatementStrategyForKsqlCommand(
|
|
||||||
Mono<KsqlCommandDTO> ksqlCommand) {
|
|
||||||
return ksqlCommand
|
|
||||||
.map(command -> ksqlStatementStrategies.stream()
|
|
||||||
.filter(s -> s.test(command.getKsql()))
|
|
||||||
.map(s -> s.ksqlCommand(command))
|
|
||||||
.findFirst())
|
|
||||||
.flatMap(Mono::justOrEmpty)
|
|
||||||
.switchIfEmpty(Mono.error(new UnprocessableEntityException("Invalid sql")));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -14,12 +14,9 @@ import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
||||||
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
import com.provectus.kafka.ui.model.TopicMessageEventDTO;
|
||||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||||
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
import com.provectus.kafka.ui.serdes.ProducerRecordCreator;
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeekForward;
|
|
||||||
import com.provectus.kafka.ui.util.ResultSizeLimiter;
|
import com.provectus.kafka.ui.util.ResultSizeLimiter;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.Properties;
|
import java.util.Properties;
|
||||||
import java.util.concurrent.CompletableFuture;
|
import java.util.concurrent.CompletableFuture;
|
||||||
import java.util.function.Predicate;
|
import java.util.function.Predicate;
|
||||||
|
@ -68,8 +65,8 @@ public class MessagesService {
|
||||||
private Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
|
private Mono<Map<TopicPartition, Long>> offsetsForDeletion(KafkaCluster cluster, String topicName,
|
||||||
List<Integer> partitionsToInclude) {
|
List<Integer> partitionsToInclude) {
|
||||||
return adminClientService.get(cluster).flatMap(ac ->
|
return adminClientService.get(cluster).flatMap(ac ->
|
||||||
ac.listOffsets(topicName, OffsetSpec.earliest())
|
ac.listTopicOffsets(topicName, OffsetSpec.earliest(), true)
|
||||||
.zipWith(ac.listOffsets(topicName, OffsetSpec.latest()),
|
.zipWith(ac.listTopicOffsets(topicName, OffsetSpec.latest(), true),
|
||||||
(start, end) ->
|
(start, end) ->
|
||||||
end.entrySet().stream()
|
end.entrySet().stream()
|
||||||
.filter(e -> partitionsToInclude.isEmpty()
|
.filter(e -> partitionsToInclude.isEmpty()
|
||||||
|
@ -129,58 +126,62 @@ public class MessagesService {
|
||||||
}
|
}
|
||||||
|
|
||||||
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
|
public Flux<TopicMessageEventDTO> loadMessages(KafkaCluster cluster, String topic,
|
||||||
ConsumerPosition consumerPosition, String query,
|
ConsumerPosition consumerPosition,
|
||||||
|
@Nullable String query,
|
||||||
MessageFilterTypeDTO filterQueryType,
|
MessageFilterTypeDTO filterQueryType,
|
||||||
int limit,
|
int limit,
|
||||||
|
SeekDirectionDTO seekDirection,
|
||||||
@Nullable String keySerde,
|
@Nullable String keySerde,
|
||||||
@Nullable String valueSerde) {
|
@Nullable String valueSerde) {
|
||||||
return withExistingTopic(cluster, topic)
|
return withExistingTopic(cluster, topic)
|
||||||
.flux()
|
.flux()
|
||||||
.flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
|
.flatMap(td -> loadMessagesImpl(cluster, topic, consumerPosition, query,
|
||||||
filterQueryType, limit, keySerde, valueSerde));
|
filterQueryType, limit, seekDirection, keySerde, valueSerde));
|
||||||
}
|
}
|
||||||
|
|
||||||
private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
|
private Flux<TopicMessageEventDTO> loadMessagesImpl(KafkaCluster cluster,
|
||||||
String topic,
|
String topic,
|
||||||
ConsumerPosition consumerPosition,
|
ConsumerPosition consumerPosition,
|
||||||
String query,
|
@Nullable String query,
|
||||||
MessageFilterTypeDTO filterQueryType,
|
MessageFilterTypeDTO filterQueryType,
|
||||||
int limit,
|
int limit,
|
||||||
|
SeekDirectionDTO seekDirection,
|
||||||
@Nullable String keySerde,
|
@Nullable String keySerde,
|
||||||
@Nullable String valueSerde) {
|
@Nullable String valueSerde) {
|
||||||
|
|
||||||
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
java.util.function.Consumer<? super FluxSink<TopicMessageEventDTO>> emitter;
|
||||||
ConsumerRecordDeserializer recordDeserializer =
|
ConsumerRecordDeserializer recordDeserializer =
|
||||||
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
deserializationService.deserializerFor(cluster, topic, keySerde, valueSerde);
|
||||||
if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.FORWARD)) {
|
if (seekDirection.equals(SeekDirectionDTO.FORWARD)) {
|
||||||
emitter = new ForwardRecordEmitter(
|
emitter = new ForwardRecordEmitter(
|
||||||
() -> consumerGroupService.createConsumer(cluster),
|
() -> consumerGroupService.createConsumer(cluster),
|
||||||
new OffsetsSeekForward(topic, consumerPosition),
|
consumerPosition,
|
||||||
recordDeserializer
|
recordDeserializer
|
||||||
);
|
);
|
||||||
} else if (consumerPosition.getSeekDirection().equals(SeekDirectionDTO.BACKWARD)) {
|
} else if (seekDirection.equals(SeekDirectionDTO.BACKWARD)) {
|
||||||
emitter = new BackwardRecordEmitter(
|
emitter = new BackwardRecordEmitter(
|
||||||
(Map<String, Object> props) -> consumerGroupService.createConsumer(cluster, props),
|
() -> consumerGroupService.createConsumer(cluster),
|
||||||
new OffsetsSeekBackward(topic, consumerPosition, limit),
|
consumerPosition,
|
||||||
|
limit,
|
||||||
recordDeserializer
|
recordDeserializer
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
emitter = new TailingEmitter(
|
emitter = new TailingEmitter(
|
||||||
recordDeserializer,
|
|
||||||
() -> consumerGroupService.createConsumer(cluster),
|
() -> consumerGroupService.createConsumer(cluster),
|
||||||
new OffsetsSeekForward(topic, consumerPosition)
|
consumerPosition,
|
||||||
|
recordDeserializer
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return Flux.create(emitter)
|
return Flux.create(emitter)
|
||||||
.filter(getMsgFilter(query, filterQueryType))
|
.filter(getMsgFilter(query, filterQueryType))
|
||||||
.takeWhile(createTakeWhilePredicate(consumerPosition, limit))
|
.takeWhile(createTakeWhilePredicate(seekDirection, limit))
|
||||||
.subscribeOn(Schedulers.boundedElastic())
|
.subscribeOn(Schedulers.boundedElastic())
|
||||||
.share();
|
.share();
|
||||||
}
|
}
|
||||||
|
|
||||||
private Predicate<TopicMessageEventDTO> createTakeWhilePredicate(
|
private Predicate<TopicMessageEventDTO> createTakeWhilePredicate(
|
||||||
ConsumerPosition consumerPosition, int limit) {
|
SeekDirectionDTO seekDirection, int limit) {
|
||||||
return consumerPosition.getSeekDirection() == SeekDirectionDTO.TAILING
|
return seekDirection == SeekDirectionDTO.TAILING
|
||||||
? evt -> true // no limit for tailing
|
? evt -> true // no limit for tailing
|
||||||
: new ResultSizeLimiter(limit);
|
: new ResultSizeLimiter(limit);
|
||||||
}
|
}
|
||||||
|
@ -189,8 +190,6 @@ public class MessagesService {
|
||||||
if (StringUtils.isEmpty(query)) {
|
if (StringUtils.isEmpty(query)) {
|
||||||
return evt -> true;
|
return evt -> true;
|
||||||
}
|
}
|
||||||
filterQueryType = Optional.ofNullable(filterQueryType)
|
|
||||||
.orElse(MessageFilterTypeDTO.STRING_CONTAINS);
|
|
||||||
var messageFilter = MessageFilters.createMsgFilter(query, filterQueryType);
|
var messageFilter = MessageFilters.createMsgFilter(query, filterQueryType);
|
||||||
return evt -> {
|
return evt -> {
|
||||||
// we only apply filter for message events
|
// we only apply filter for message events
|
||||||
|
|
|
@ -47,11 +47,12 @@ public class OffsetsResetService {
|
||||||
@Nullable Collection<Integer> partitions,
|
@Nullable Collection<Integer> partitions,
|
||||||
OffsetSpec spec) {
|
OffsetSpec spec) {
|
||||||
if (partitions == null) {
|
if (partitions == null) {
|
||||||
return client.listOffsets(topic, spec);
|
return client.listTopicOffsets(topic, spec, true);
|
||||||
}
|
}
|
||||||
return client.listOffsets(
|
return client.listOffsets(
|
||||||
partitions.stream().map(idx -> new TopicPartition(topic, idx)).collect(toSet()),
|
partitions.stream().map(idx -> new TopicPartition(topic, idx)).collect(toSet()),
|
||||||
spec
|
spec,
|
||||||
|
true
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,9 +85,9 @@ public class OffsetsResetService {
|
||||||
.collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
|
.collect(toMap(e -> new TopicPartition(topic, e.getKey()), Map.Entry::getValue));
|
||||||
return checkGroupCondition(cluster, group).flatMap(
|
return checkGroupCondition(cluster, group).flatMap(
|
||||||
ac ->
|
ac ->
|
||||||
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.earliest())
|
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.earliest(), true)
|
||||||
.flatMap(earliest ->
|
.flatMap(earliest ->
|
||||||
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.latest())
|
ac.listOffsets(partitionOffsets.keySet(), OffsetSpec.latest(), true)
|
||||||
.map(latest -> editOffsetsBounds(partitionOffsets, earliest, latest))
|
.map(latest -> editOffsetsBounds(partitionOffsets, earliest, latest))
|
||||||
.flatMap(offsetsToCommit -> resetOffsets(ac, group, offsetsToCommit)))
|
.flatMap(offsetsToCommit -> resetOffsets(ac, group, offsetsToCommit)))
|
||||||
);
|
);
|
||||||
|
|
|
@ -9,11 +9,15 @@ import com.google.common.collect.ImmutableMap;
|
||||||
import com.google.common.collect.Iterators;
|
import com.google.common.collect.Iterators;
|
||||||
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
import com.provectus.kafka.ui.exception.IllegalEntityStateException;
|
||||||
import com.provectus.kafka.ui.exception.NotFoundException;
|
import com.provectus.kafka.ui.exception.NotFoundException;
|
||||||
|
import com.provectus.kafka.ui.exception.ValidationException;
|
||||||
import com.provectus.kafka.ui.util.MapUtil;
|
import com.provectus.kafka.ui.util.MapUtil;
|
||||||
import com.provectus.kafka.ui.util.NumberUtil;
|
import com.provectus.kafka.ui.util.NumberUtil;
|
||||||
|
import com.provectus.kafka.ui.util.annotations.KafkaClientInternalsDependant;
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
|
import java.util.Arrays;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
@ -25,6 +29,7 @@ import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.function.BiFunction;
|
import java.util.function.BiFunction;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
|
import java.util.function.Predicate;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
import java.util.stream.Stream;
|
import java.util.stream.Stream;
|
||||||
import javax.annotation.Nullable;
|
import javax.annotation.Nullable;
|
||||||
|
@ -51,6 +56,7 @@ import org.apache.kafka.common.KafkaException;
|
||||||
import org.apache.kafka.common.KafkaFuture;
|
import org.apache.kafka.common.KafkaFuture;
|
||||||
import org.apache.kafka.common.Node;
|
import org.apache.kafka.common.Node;
|
||||||
import org.apache.kafka.common.TopicPartition;
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.apache.kafka.common.TopicPartitionInfo;
|
||||||
import org.apache.kafka.common.TopicPartitionReplica;
|
import org.apache.kafka.common.TopicPartitionReplica;
|
||||||
import org.apache.kafka.common.acl.AclBinding;
|
import org.apache.kafka.common.acl.AclBinding;
|
||||||
import org.apache.kafka.common.acl.AclBindingFilter;
|
import org.apache.kafka.common.acl.AclBindingFilter;
|
||||||
|
@ -422,21 +428,81 @@ public class ReactiveAdminClient implements Closeable {
|
||||||
.all());
|
.all());
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Map<TopicPartition, Long>> listOffsets(String topic,
|
/**
|
||||||
OffsetSpec offsetSpec) {
|
* List offset for the topic's partitions and OffsetSpec.
|
||||||
return topicPartitions(topic).flatMap(tps -> listOffsets(tps, offsetSpec));
|
* @param failOnUnknownLeader true - throw exception in case of no-leader partitions,
|
||||||
|
* false - skip partitions with no leader
|
||||||
|
*/
|
||||||
|
public Mono<Map<TopicPartition, Long>> listTopicOffsets(String topic,
|
||||||
|
OffsetSpec offsetSpec,
|
||||||
|
boolean failOnUnknownLeader) {
|
||||||
|
return describeTopic(topic)
|
||||||
|
.map(td -> filterPartitionsWithLeaderCheck(List.of(td), p -> true, failOnUnknownLeader))
|
||||||
|
.flatMap(partitions -> listOffsetsUnsafe(partitions, offsetSpec));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* List offset for the specified partitions and OffsetSpec.
|
||||||
|
* @param failOnUnknownLeader true - throw exception in case of no-leader partitions,
|
||||||
|
* false - skip partitions with no leader
|
||||||
|
*/
|
||||||
public Mono<Map<TopicPartition, Long>> listOffsets(Collection<TopicPartition> partitions,
|
public Mono<Map<TopicPartition, Long>> listOffsets(Collection<TopicPartition> partitions,
|
||||||
OffsetSpec offsetSpec) {
|
OffsetSpec offsetSpec,
|
||||||
//TODO: need to split this into multiple calls if number of target partitions is big
|
boolean failOnUnknownLeader) {
|
||||||
return toMono(
|
return filterPartitionsWithLeaderCheck(partitions, failOnUnknownLeader)
|
||||||
client.listOffsets(partitions.stream().collect(toMap(tp -> tp, tp -> offsetSpec))).all())
|
.flatMap(parts -> listOffsetsUnsafe(parts, offsetSpec));
|
||||||
.map(offsets -> offsets.entrySet()
|
}
|
||||||
.stream()
|
|
||||||
// filtering partitions for which offsets were not found
|
private Mono<Collection<TopicPartition>> filterPartitionsWithLeaderCheck(Collection<TopicPartition> partitions,
|
||||||
.filter(e -> e.getValue().offset() >= 0)
|
boolean failOnUnknownLeader) {
|
||||||
.collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())));
|
var targetTopics = partitions.stream().map(TopicPartition::topic).collect(Collectors.toSet());
|
||||||
|
return describeTopicsImpl(targetTopics)
|
||||||
|
.map(descriptions ->
|
||||||
|
filterPartitionsWithLeaderCheck(
|
||||||
|
descriptions.values(), partitions::contains, failOnUnknownLeader));
|
||||||
|
}
|
||||||
|
|
||||||
|
private Set<TopicPartition> filterPartitionsWithLeaderCheck(Collection<TopicDescription> topicDescriptions,
|
||||||
|
Predicate<TopicPartition> partitionPredicate,
|
||||||
|
boolean failOnUnknownLeader) {
|
||||||
|
var goodPartitions = new HashSet<TopicPartition>();
|
||||||
|
for (TopicDescription description : topicDescriptions) {
|
||||||
|
for (TopicPartitionInfo partitionInfo : description.partitions()) {
|
||||||
|
TopicPartition topicPartition = new TopicPartition(description.name(), partitionInfo.partition());
|
||||||
|
if (!partitionPredicate.test(topicPartition)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (partitionInfo.leader() != null) {
|
||||||
|
goodPartitions.add(topicPartition);
|
||||||
|
} else if (failOnUnknownLeader) {
|
||||||
|
throw new ValidationException(String.format("Topic partition %s has no leader", topicPartition));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return goodPartitions;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. NOTE(!): should only apply for partitions with existing leader,
|
||||||
|
// otherwise AdminClient will try to fetch topic metadata, fail and retry infinitely (until timeout)
|
||||||
|
// 2. TODO: check if it is a bug that AdminClient never throws LeaderNotAvailableException and just retrying instead
|
||||||
|
@KafkaClientInternalsDependant
|
||||||
|
public Mono<Map<TopicPartition, Long>> listOffsetsUnsafe(Collection<TopicPartition> partitions,
|
||||||
|
OffsetSpec offsetSpec) {
|
||||||
|
|
||||||
|
Function<Collection<TopicPartition>, Mono<Map<TopicPartition, Long>>> call =
|
||||||
|
parts -> toMono(
|
||||||
|
client.listOffsets(parts.stream().collect(toMap(tp -> tp, tp -> offsetSpec))).all())
|
||||||
|
.map(offsets -> offsets.entrySet().stream()
|
||||||
|
// filtering partitions for which offsets were not found
|
||||||
|
.filter(e -> e.getValue().offset() >= 0)
|
||||||
|
.collect(toMap(Map.Entry::getKey, e -> e.getValue().offset())));
|
||||||
|
|
||||||
|
return partitionCalls(
|
||||||
|
partitions,
|
||||||
|
200,
|
||||||
|
call,
|
||||||
|
(m1, m2) -> ImmutableMap.<TopicPartition, Long>builder().putAll(m1).putAll(m2).build()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Collection<AclBinding>> listAcls() {
|
public Mono<Collection<AclBinding>> listAcls() {
|
||||||
|
@ -455,17 +521,6 @@ public class ReactiveAdminClient implements Closeable {
|
||||||
return toMono(client.deleteAcls(filters).all()).then();
|
return toMono(client.deleteAcls(filters).all()).then();
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<Set<TopicPartition>> topicPartitions(String topic) {
|
|
||||||
return toMono(client.describeTopics(List.of(topic)).all())
|
|
||||||
.map(r -> r.values().stream()
|
|
||||||
.findFirst()
|
|
||||||
.stream()
|
|
||||||
.flatMap(d -> d.partitions().stream())
|
|
||||||
.map(p -> new TopicPartition(topic, p.partition()))
|
|
||||||
.collect(Collectors.toSet())
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Mono<Void> updateBrokerConfigByName(Integer brokerId, String name, String value) {
|
public Mono<Void> updateBrokerConfigByName(Integer brokerId, String name, String value) {
|
||||||
ConfigResource cr = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(brokerId));
|
ConfigResource cr = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(brokerId));
|
||||||
AlterConfigOp op = new AlterConfigOp(new ConfigEntry(name, value), AlterConfigOp.OpType.SET);
|
AlterConfigOp op = new AlterConfigOp(new ConfigEntry(name, value), AlterConfigOp.OpType.SET);
|
||||||
|
|
|
@ -138,11 +138,15 @@ public class TopicsService {
|
||||||
ReactiveAdminClient ac) {
|
ReactiveAdminClient ac) {
|
||||||
var topicPartitions = descriptions.values().stream()
|
var topicPartitions = descriptions.values().stream()
|
||||||
.flatMap(desc ->
|
.flatMap(desc ->
|
||||||
desc.partitions().stream().map(p -> new TopicPartition(desc.name(), p.partition())))
|
desc.partitions().stream()
|
||||||
|
// list offsets should only be applied to partitions with existing leader
|
||||||
|
// (see ReactiveAdminClient.listOffsetsUnsafe(..) docs)
|
||||||
|
.filter(tp -> tp.leader() != null)
|
||||||
|
.map(p -> new TopicPartition(desc.name(), p.partition())))
|
||||||
.collect(toList());
|
.collect(toList());
|
||||||
|
|
||||||
return ac.listOffsets(topicPartitions, OffsetSpec.earliest())
|
return ac.listOffsetsUnsafe(topicPartitions, OffsetSpec.earliest())
|
||||||
.zipWith(ac.listOffsets(topicPartitions, OffsetSpec.latest()),
|
.zipWith(ac.listOffsetsUnsafe(topicPartitions, OffsetSpec.latest()),
|
||||||
(earliest, latest) ->
|
(earliest, latest) ->
|
||||||
topicPartitions.stream()
|
topicPartitions.stream()
|
||||||
.filter(tp -> earliest.containsKey(tp) && latest.containsKey(tp))
|
.filter(tp -> earliest.containsKey(tp) && latest.containsKey(tp))
|
||||||
|
|
|
@ -2,12 +2,12 @@ package com.provectus.kafka.ui.service.analyze;
|
||||||
|
|
||||||
import static com.provectus.kafka.ui.emitter.AbstractEmitter.NO_MORE_DATA_EMPTY_POLLS_COUNT;
|
import static com.provectus.kafka.ui.emitter.AbstractEmitter.NO_MORE_DATA_EMPTY_POLLS_COUNT;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.emitter.OffsetsInfo;
|
||||||
import com.provectus.kafka.ui.exception.TopicAnalysisException;
|
import com.provectus.kafka.ui.exception.TopicAnalysisException;
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
import com.provectus.kafka.ui.model.KafkaCluster;
|
||||||
import com.provectus.kafka.ui.model.TopicAnalysisDTO;
|
import com.provectus.kafka.ui.model.TopicAnalysisDTO;
|
||||||
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
import com.provectus.kafka.ui.service.ConsumerGroupService;
|
||||||
import com.provectus.kafka.ui.service.TopicsService;
|
import com.provectus.kafka.ui.service.TopicsService;
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeek.WaitingOffsets;
|
|
||||||
import java.io.Closeable;
|
import java.io.Closeable;
|
||||||
import java.time.Duration;
|
import java.time.Duration;
|
||||||
import java.time.Instant;
|
import java.time.Instant;
|
||||||
|
@ -119,14 +119,14 @@ public class TopicAnalysisService {
|
||||||
consumer.assign(topicPartitions);
|
consumer.assign(topicPartitions);
|
||||||
consumer.seekToBeginning(topicPartitions);
|
consumer.seekToBeginning(topicPartitions);
|
||||||
|
|
||||||
var waitingOffsets = new WaitingOffsets(topicId.topicName, consumer, topicPartitions);
|
var offsetsInfo = new OffsetsInfo(consumer, topicId.topicName);
|
||||||
for (int emptyPolls = 0; !waitingOffsets.endReached() && emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT;) {
|
for (int emptyPolls = 0; !offsetsInfo.assignedPartitionsFullyPolled()
|
||||||
|
&& emptyPolls < NO_MORE_DATA_EMPTY_POLLS_COUNT;) {
|
||||||
var polled = consumer.poll(Duration.ofSeconds(3));
|
var polled = consumer.poll(Duration.ofSeconds(3));
|
||||||
emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
|
emptyPolls = polled.isEmpty() ? emptyPolls + 1 : 0;
|
||||||
polled.forEach(r -> {
|
polled.forEach(r -> {
|
||||||
totalStats.apply(r);
|
totalStats.apply(r);
|
||||||
partitionStats.get(r.partition()).apply(r);
|
partitionStats.get(r.partition()).apply(r);
|
||||||
waitingOffsets.markPolled(r);
|
|
||||||
});
|
});
|
||||||
updateProgress();
|
updateProgress();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,166 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TableDTO;
|
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Spliterator;
|
|
||||||
import java.util.Spliterators;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.IntStream;
|
|
||||||
import java.util.stream.Stream;
|
|
||||||
import java.util.stream.StreamSupport;
|
|
||||||
|
|
||||||
public abstract class BaseStrategy {
|
|
||||||
protected static final String KSQL_REQUEST_PATH = "/ksql";
|
|
||||||
protected static final String QUERY_REQUEST_PATH = "/query";
|
|
||||||
private static final String MAPPING_EXCEPTION_ERROR = "KSQL DB response mapping error";
|
|
||||||
protected String host = null;
|
|
||||||
protected KsqlCommandDTO ksqlCommand = null;
|
|
||||||
|
|
||||||
public String getUri() {
|
|
||||||
if (this.host != null) {
|
|
||||||
return this.host + this.getRequestPath();
|
|
||||||
}
|
|
||||||
throw new UnprocessableEntityException("Strategy doesn't have host");
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean test(String sql) {
|
|
||||||
return sql.trim().toLowerCase().matches(getTestRegExp());
|
|
||||||
}
|
|
||||||
|
|
||||||
public BaseStrategy host(String host) {
|
|
||||||
this.host = host;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
public KsqlCommandDTO getKsqlCommand() {
|
|
||||||
return ksqlCommand;
|
|
||||||
}
|
|
||||||
|
|
||||||
public BaseStrategy ksqlCommand(KsqlCommandDTO ksqlCommand) {
|
|
||||||
this.ksqlCommand = ksqlCommand;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected String getRequestPath() {
|
|
||||||
return BaseStrategy.KSQL_REQUEST_PATH;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected KsqlCommandResponseDTO serializeTableResponse(JsonNode response, String key) {
|
|
||||||
JsonNode item = getResponseFirstItemValue(response, key);
|
|
||||||
TableDTO table = item.isArray() ? getTableFromArray(item) : getTableFromObject(item);
|
|
||||||
return (new KsqlCommandResponseDTO()).data(table);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected KsqlCommandResponseDTO serializeMessageResponse(JsonNode response, String key) {
|
|
||||||
JsonNode item = getResponseFirstItemValue(response, key);
|
|
||||||
return (new KsqlCommandResponseDTO()).message(getMessageFromObject(item));
|
|
||||||
}
|
|
||||||
|
|
||||||
protected KsqlCommandResponseDTO serializeQueryResponse(JsonNode response) {
|
|
||||||
if (response.isArray() && response.size() > 0) {
|
|
||||||
TableDTO table = (new TableDTO())
|
|
||||||
.headers(getQueryResponseHeader(response))
|
|
||||||
.rows(getQueryResponseRows(response));
|
|
||||||
return (new KsqlCommandResponseDTO()).data(table);
|
|
||||||
}
|
|
||||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
|
||||||
}
|
|
||||||
|
|
||||||
private JsonNode getResponseFirstItemValue(JsonNode response, String key) {
|
|
||||||
if (response.isArray() && response.size() > 0) {
|
|
||||||
JsonNode first = response.get(0);
|
|
||||||
if (first.has(key)) {
|
|
||||||
return first.path(key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<String> getQueryResponseHeader(JsonNode response) {
|
|
||||||
JsonNode headerRow = response.get(0);
|
|
||||||
if (headerRow.isObject() && headerRow.has("header")) {
|
|
||||||
String schema = headerRow.get("header").get("schema").asText();
|
|
||||||
return Arrays.stream(schema.split(",")).map(String::trim).collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
return new ArrayList<>();
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<List<String>> getQueryResponseRows(JsonNode node) {
|
|
||||||
return getStreamForJsonArray(node)
|
|
||||||
.filter(row -> row.has("row") && row.get("row").has("columns"))
|
|
||||||
.map(row -> row.get("row").get("columns"))
|
|
||||||
.map(cellNode -> getStreamForJsonArray(cellNode)
|
|
||||||
.map(JsonNode::asText)
|
|
||||||
.collect(Collectors.toList())
|
|
||||||
)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
private TableDTO getTableFromArray(JsonNode node) {
|
|
||||||
TableDTO table = new TableDTO();
|
|
||||||
table.headers(new ArrayList<>()).rows(new ArrayList<>());
|
|
||||||
if (node.size() > 0) {
|
|
||||||
List<String> keys = getJsonObjectKeys(node.get(0));
|
|
||||||
List<List<String>> rows = getTableRows(node, keys);
|
|
||||||
table.headers(keys).rows(rows);
|
|
||||||
}
|
|
||||||
return table;
|
|
||||||
}
|
|
||||||
|
|
||||||
private TableDTO getTableFromObject(JsonNode node) {
|
|
||||||
List<String> keys = getJsonObjectKeys(node);
|
|
||||||
List<String> values = getJsonObjectValues(node);
|
|
||||||
List<List<String>> rows = IntStream
|
|
||||||
.range(0, keys.size())
|
|
||||||
.mapToObj(i -> List.of(keys.get(i), values.get(i)))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
return (new TableDTO()).headers(List.of("key", "value")).rows(rows);
|
|
||||||
}
|
|
||||||
|
|
||||||
private String getMessageFromObject(JsonNode node) {
|
|
||||||
if (node.isObject() && node.has("message")) {
|
|
||||||
return node.get("message").asText();
|
|
||||||
}
|
|
||||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<List<String>> getTableRows(JsonNode node, List<String> keys) {
|
|
||||||
return getStreamForJsonArray(node)
|
|
||||||
.map(row -> keys.stream()
|
|
||||||
.map(header -> row.get(header).asText())
|
|
||||||
.collect(Collectors.toList())
|
|
||||||
)
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
private Stream<JsonNode> getStreamForJsonArray(JsonNode node) {
|
|
||||||
if (node.isArray() && node.size() > 0) {
|
|
||||||
return StreamSupport.stream(node.spliterator(), false);
|
|
||||||
}
|
|
||||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<String> getJsonObjectKeys(JsonNode node) {
|
|
||||||
if (node.isObject()) {
|
|
||||||
return StreamSupport.stream(
|
|
||||||
Spliterators.spliteratorUnknownSize(node.fieldNames(), Spliterator.ORDERED), false
|
|
||||||
).collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
throw new UnprocessableEntityException(MAPPING_EXCEPTION_ERROR);
|
|
||||||
}
|
|
||||||
|
|
||||||
private List<String> getJsonObjectValues(JsonNode node) {
|
|
||||||
return getJsonObjectKeys(node).stream().map(key -> node.get(key).asText())
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
public abstract KsqlCommandResponseDTO serializeResponse(JsonNode response);
|
|
||||||
|
|
||||||
protected abstract String getTestRegExp();
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
@Component
|
|
||||||
public class CreateStrategy extends BaseStrategy {
|
|
||||||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
|
||||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getTestRegExp() {
|
|
||||||
return "create (table|stream)(.*)(with|as select(.*)from)(.*);";
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
@Component
|
|
||||||
public class DescribeStrategy extends BaseStrategy {
|
|
||||||
private static final String RESPONSE_VALUE_KEY = "sourceDescription";
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
|
||||||
return serializeTableResponse(response, RESPONSE_VALUE_KEY);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getTestRegExp() {
|
|
||||||
return "describe (.*);";
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
@Component
|
|
||||||
public class DropStrategy extends BaseStrategy {
|
|
||||||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
|
||||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getTestRegExp() {
|
|
||||||
return "drop (table|stream) (.*);";
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
@Component
|
|
||||||
public class ExplainStrategy extends BaseStrategy {
|
|
||||||
private static final String RESPONSE_VALUE_KEY = "queryDescription";
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
|
||||||
return serializeTableResponse(response, RESPONSE_VALUE_KEY);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getTestRegExp() {
|
|
||||||
return "explain (.*);";
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
@Component
|
|
||||||
public class SelectStrategy extends BaseStrategy {
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
|
||||||
return serializeQueryResponse(response);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getRequestPath() {
|
|
||||||
return BaseStrategy.QUERY_REQUEST_PATH;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getTestRegExp() {
|
|
||||||
return "select (.*) from (.*);";
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,67 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Optional;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
@Component
|
|
||||||
public class ShowStrategy extends BaseStrategy {
|
|
||||||
private static final List<String> SHOW_STATEMENTS =
|
|
||||||
List.of("functions", "topics", "streams", "tables", "queries", "properties");
|
|
||||||
private static final List<String> LIST_STATEMENTS =
|
|
||||||
List.of("functions", "topics", "streams", "tables");
|
|
||||||
private String responseValueKey = "";
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
|
||||||
return serializeTableResponse(response, responseValueKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean test(String sql) {
|
|
||||||
Optional<String> statement = SHOW_STATEMENTS.stream()
|
|
||||||
.filter(s -> testSql(sql, getShowRegExp(s)) || testSql(sql, getListRegExp(s)))
|
|
||||||
.findFirst();
|
|
||||||
if (statement.isPresent()) {
|
|
||||||
setResponseValueKey(statement.get());
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getTestRegExp() {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public BaseStrategy ksqlCommand(KsqlCommandDTO ksqlCommand) {
|
|
||||||
// return new instance to avoid conflicts for parallel requests
|
|
||||||
ShowStrategy clone = new ShowStrategy();
|
|
||||||
clone.setResponseValueKey(responseValueKey);
|
|
||||||
clone.ksqlCommand = ksqlCommand;
|
|
||||||
return clone;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected String getShowRegExp(String key) {
|
|
||||||
return "show " + key + ";";
|
|
||||||
}
|
|
||||||
|
|
||||||
protected String getListRegExp(String key) {
|
|
||||||
if (LIST_STATEMENTS.contains(key)) {
|
|
||||||
return "list " + key + ";";
|
|
||||||
}
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
private void setResponseValueKey(String path) {
|
|
||||||
responseValueKey = path;
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean testSql(String sql, String pattern) {
|
|
||||||
return sql.trim().toLowerCase().matches(pattern);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import org.springframework.stereotype.Component;
|
|
||||||
|
|
||||||
@Component
|
|
||||||
public class TerminateStrategy extends BaseStrategy {
|
|
||||||
private static final String RESPONSE_VALUE_KEY = "commandStatus";
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public KsqlCommandResponseDTO serializeResponse(JsonNode response) {
|
|
||||||
return serializeMessageResponse(response, RESPONSE_VALUE_KEY);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
protected String getTestRegExp() {
|
|
||||||
return "terminate (.*);";
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,143 +0,0 @@
|
||||||
package com.provectus.kafka.ui.util;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
|
||||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
|
||||||
import org.apache.kafka.common.TopicPartition;
|
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
|
||||||
import reactor.util.function.Tuple2;
|
|
||||||
import reactor.util.function.Tuples;
|
|
||||||
|
|
||||||
@Slf4j
|
|
||||||
public abstract class OffsetsSeek {
|
|
||||||
protected final String topic;
|
|
||||||
protected final ConsumerPosition consumerPosition;
|
|
||||||
|
|
||||||
protected OffsetsSeek(String topic, ConsumerPosition consumerPosition) {
|
|
||||||
this.topic = topic;
|
|
||||||
this.consumerPosition = consumerPosition;
|
|
||||||
}
|
|
||||||
|
|
||||||
public ConsumerPosition getConsumerPosition() {
|
|
||||||
return consumerPosition;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<TopicPartition, Long> getPartitionsOffsets(Consumer<Bytes, Bytes> consumer) {
|
|
||||||
SeekTypeDTO seekType = consumerPosition.getSeekType();
|
|
||||||
List<TopicPartition> partitions = getRequestedPartitions(consumer);
|
|
||||||
log.info("Positioning consumer for topic {} with {}", topic, consumerPosition);
|
|
||||||
Map<TopicPartition, Long> offsets;
|
|
||||||
switch (seekType) {
|
|
||||||
case OFFSET:
|
|
||||||
offsets = offsetsFromPositions(consumer, partitions);
|
|
||||||
break;
|
|
||||||
case TIMESTAMP:
|
|
||||||
offsets = offsetsForTimestamp(consumer);
|
|
||||||
break;
|
|
||||||
case BEGINNING:
|
|
||||||
offsets = offsetsFromBeginning(consumer, partitions);
|
|
||||||
break;
|
|
||||||
case LATEST:
|
|
||||||
offsets = endOffsets(consumer, partitions);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw new IllegalArgumentException("Unknown seekType: " + seekType);
|
|
||||||
}
|
|
||||||
return offsets;
|
|
||||||
}
|
|
||||||
|
|
||||||
public WaitingOffsets waitingOffsets(Consumer<Bytes, Bytes> consumer,
|
|
||||||
Collection<TopicPartition> partitions) {
|
|
||||||
return new WaitingOffsets(topic, consumer, partitions);
|
|
||||||
}
|
|
||||||
|
|
||||||
public WaitingOffsets assignAndSeek(Consumer<Bytes, Bytes> consumer) {
|
|
||||||
final Map<TopicPartition, Long> partitionsOffsets = getPartitionsOffsets(consumer);
|
|
||||||
consumer.assign(partitionsOffsets.keySet());
|
|
||||||
partitionsOffsets.forEach(consumer::seek);
|
|
||||||
log.info("Assignment: {}", consumer.assignment());
|
|
||||||
return waitingOffsets(consumer, partitionsOffsets.keySet());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public List<TopicPartition> getRequestedPartitions(Consumer<Bytes, Bytes> consumer) {
|
|
||||||
Map<TopicPartition, Long> partitionPositions = consumerPosition.getSeekTo();
|
|
||||||
return consumer.partitionsFor(topic).stream()
|
|
||||||
.filter(
|
|
||||||
p -> partitionPositions.isEmpty()
|
|
||||||
|| partitionPositions.containsKey(new TopicPartition(p.topic(), p.partition()))
|
|
||||||
).map(p -> new TopicPartition(p.topic(), p.partition()))
|
|
||||||
.collect(Collectors.toList());
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> endOffsets(
|
|
||||||
Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions) {
|
|
||||||
return consumer.endOffsets(partitions);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected abstract Map<TopicPartition, Long> offsetsFromBeginning(
|
|
||||||
Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions);
|
|
||||||
|
|
||||||
protected abstract Map<TopicPartition, Long> offsetsForTimestamp(
|
|
||||||
Consumer<Bytes, Bytes> consumer);
|
|
||||||
|
|
||||||
protected abstract Map<TopicPartition, Long> offsetsFromPositions(
|
|
||||||
Consumer<Bytes, Bytes> consumer, List<TopicPartition> partitions);
|
|
||||||
|
|
||||||
public static class WaitingOffsets {
|
|
||||||
private final Map<Integer, Long> endOffsets; // partition number -> offset
|
|
||||||
private final Map<Integer, Long> beginOffsets; // partition number -> offset
|
|
||||||
|
|
||||||
public WaitingOffsets(String topic, Consumer<?, ?> consumer,
|
|
||||||
Collection<TopicPartition> partitions) {
|
|
||||||
var allBeginningOffsets = consumer.beginningOffsets(partitions);
|
|
||||||
var allEndOffsets = consumer.endOffsets(partitions);
|
|
||||||
|
|
||||||
this.endOffsets = allEndOffsets.entrySet().stream()
|
|
||||||
.filter(entry -> !allBeginningOffsets.get(entry.getKey()).equals(entry.getValue()))
|
|
||||||
.map(e -> Tuples.of(e.getKey().partition(), e.getValue() - 1))
|
|
||||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
|
||||||
|
|
||||||
this.beginOffsets = this.endOffsets.keySet().stream()
|
|
||||||
.map(p -> Tuples.of(p, allBeginningOffsets.get(new TopicPartition(topic, p))))
|
|
||||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
|
||||||
}
|
|
||||||
|
|
||||||
public void markPolled(ConsumerRecord<?, ?> rec) {
|
|
||||||
markPolled(rec.partition(), rec.offset());
|
|
||||||
}
|
|
||||||
|
|
||||||
public void markPolled(int partition, long offset) {
|
|
||||||
Long endWaiting = endOffsets.get(partition);
|
|
||||||
if (endWaiting != null && endWaiting <= offset) {
|
|
||||||
endOffsets.remove(partition);
|
|
||||||
}
|
|
||||||
Long beginWaiting = beginOffsets.get(partition);
|
|
||||||
if (beginWaiting != null && beginWaiting >= offset) {
|
|
||||||
beginOffsets.remove(partition);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean endReached() {
|
|
||||||
return endOffsets.isEmpty();
|
|
||||||
}
|
|
||||||
|
|
||||||
public boolean beginReached() {
|
|
||||||
return beginOffsets.isEmpty();
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<Integer, Long> getEndOffsets() {
|
|
||||||
return endOffsets;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Map<Integer, Long> getBeginOffsets() {
|
|
||||||
return beginOffsets;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,120 +0,0 @@
|
||||||
package com.provectus.kafka.ui.util;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
|
||||||
import java.util.Collection;
|
|
||||||
import java.util.HashMap;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
|
||||||
import org.apache.kafka.common.TopicPartition;
|
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
|
||||||
import reactor.util.function.Tuple2;
|
|
||||||
import reactor.util.function.Tuples;
|
|
||||||
|
|
||||||
@Slf4j
|
|
||||||
public class OffsetsSeekBackward extends OffsetsSeek {
|
|
||||||
|
|
||||||
private final int maxMessages;
|
|
||||||
|
|
||||||
public OffsetsSeekBackward(String topic,
|
|
||||||
ConsumerPosition consumerPosition, int maxMessages) {
|
|
||||||
super(topic, consumerPosition);
|
|
||||||
this.maxMessages = maxMessages;
|
|
||||||
}
|
|
||||||
|
|
||||||
public int msgsPerPartition(int partitionsSize) {
|
|
||||||
return msgsPerPartition(maxMessages, partitionsSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
public int msgsPerPartition(long awaitingMessages, int partitionsSize) {
|
|
||||||
return (int) Math.ceil((double) awaitingMessages / partitionsSize);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
|
|
||||||
List<TopicPartition> partitions) {
|
|
||||||
|
|
||||||
return findOffsetsInt(consumer, consumerPosition.getSeekTo(), partitions);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
|
|
||||||
List<TopicPartition> partitions) {
|
|
||||||
return findOffsets(consumer, Map.of(), partitions);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> offsetsForTimestamp(Consumer<Bytes, Bytes> consumer) {
|
|
||||||
Map<TopicPartition, Long> timestampsToSearch =
|
|
||||||
consumerPosition.getSeekTo().entrySet().stream()
|
|
||||||
.collect(Collectors.toMap(
|
|
||||||
Map.Entry::getKey,
|
|
||||||
Map.Entry::getValue
|
|
||||||
));
|
|
||||||
Map<TopicPartition, Long> offsetsForTimestamps = consumer.offsetsForTimes(timestampsToSearch)
|
|
||||||
.entrySet().stream()
|
|
||||||
.filter(e -> e.getValue() != null)
|
|
||||||
.map(v -> Tuples.of(v.getKey(), v.getValue().offset()))
|
|
||||||
.collect(Collectors.toMap(Tuple2::getT1, Tuple2::getT2));
|
|
||||||
|
|
||||||
if (offsetsForTimestamps.isEmpty()) {
|
|
||||||
throw new IllegalArgumentException("No offsets were found for requested timestamps");
|
|
||||||
}
|
|
||||||
|
|
||||||
log.info("Timestamps: {} to offsets: {}", timestampsToSearch, offsetsForTimestamps);
|
|
||||||
|
|
||||||
return findOffsets(consumer, offsetsForTimestamps, offsetsForTimestamps.keySet());
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> findOffsetsInt(
|
|
||||||
Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo,
|
|
||||||
List<TopicPartition> partitions) {
|
|
||||||
return findOffsets(consumer, seekTo, partitions);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> findOffsets(
|
|
||||||
Consumer<Bytes, Bytes> consumer, Map<TopicPartition, Long> seekTo,
|
|
||||||
Collection<TopicPartition> partitions) {
|
|
||||||
|
|
||||||
final Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(partitions);
|
|
||||||
final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(partitions);
|
|
||||||
|
|
||||||
final Map<TopicPartition, Long> seekMap = new HashMap<>();
|
|
||||||
final Set<TopicPartition> emptyPartitions = new HashSet<>();
|
|
||||||
|
|
||||||
for (Map.Entry<TopicPartition, Long> entry : seekTo.entrySet()) {
|
|
||||||
final Long endOffset = endOffsets.get(entry.getKey());
|
|
||||||
final Long beginningOffset = beginningOffsets.get(entry.getKey());
|
|
||||||
if (beginningOffset != null
|
|
||||||
&& endOffset != null
|
|
||||||
&& beginningOffset < endOffset
|
|
||||||
&& entry.getValue() > beginningOffset
|
|
||||||
) {
|
|
||||||
final Long value;
|
|
||||||
if (entry.getValue() > endOffset) {
|
|
||||||
value = endOffset;
|
|
||||||
} else {
|
|
||||||
value = entry.getValue();
|
|
||||||
}
|
|
||||||
|
|
||||||
seekMap.put(entry.getKey(), value);
|
|
||||||
} else {
|
|
||||||
emptyPartitions.add(entry.getKey());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Set<TopicPartition> waiting = new HashSet<>(partitions);
|
|
||||||
waiting.removeAll(emptyPartitions);
|
|
||||||
waiting.removeAll(seekMap.keySet());
|
|
||||||
|
|
||||||
for (TopicPartition topicPartition : waiting) {
|
|
||||||
seekMap.put(topicPartition, endOffsets.get(topicPartition));
|
|
||||||
}
|
|
||||||
|
|
||||||
return seekMap;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,61 +0,0 @@
|
||||||
package com.provectus.kafka.ui.util;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import lombok.extern.slf4j.Slf4j;
|
|
||||||
import org.apache.kafka.clients.consumer.Consumer;
|
|
||||||
import org.apache.kafka.common.TopicPartition;
|
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
|
||||||
|
|
||||||
@Slf4j
|
|
||||||
public class OffsetsSeekForward extends OffsetsSeek {
|
|
||||||
|
|
||||||
public OffsetsSeekForward(String topic, ConsumerPosition consumerPosition) {
|
|
||||||
super(topic, consumerPosition);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> offsetsFromPositions(Consumer<Bytes, Bytes> consumer,
|
|
||||||
List<TopicPartition> partitions) {
|
|
||||||
final Map<TopicPartition, Long> offsets =
|
|
||||||
offsetsFromBeginning(consumer, partitions);
|
|
||||||
|
|
||||||
final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(offsets.keySet());
|
|
||||||
final Set<TopicPartition> set = new HashSet<>(consumerPosition.getSeekTo().keySet());
|
|
||||||
final Map<TopicPartition, Long> collect = consumerPosition.getSeekTo().entrySet().stream()
|
|
||||||
.filter(e -> e.getValue() < endOffsets.get(e.getKey()))
|
|
||||||
.filter(e -> endOffsets.get(e.getKey()) > offsets.get(e.getKey()))
|
|
||||||
.collect(Collectors.toMap(
|
|
||||||
Map.Entry::getKey,
|
|
||||||
Map.Entry::getValue
|
|
||||||
));
|
|
||||||
offsets.putAll(collect);
|
|
||||||
set.removeAll(collect.keySet());
|
|
||||||
set.forEach(offsets::remove);
|
|
||||||
|
|
||||||
return offsets;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> offsetsForTimestamp(Consumer<Bytes, Bytes> consumer) {
|
|
||||||
Map<TopicPartition, Long> offsetsForTimestamps =
|
|
||||||
consumer.offsetsForTimes(consumerPosition.getSeekTo())
|
|
||||||
.entrySet().stream()
|
|
||||||
.filter(e -> e.getValue() != null)
|
|
||||||
.collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));
|
|
||||||
|
|
||||||
if (offsetsForTimestamps.isEmpty()) {
|
|
||||||
throw new IllegalArgumentException("No offsets were found for requested timestamps");
|
|
||||||
}
|
|
||||||
|
|
||||||
return offsetsForTimestamps;
|
|
||||||
}
|
|
||||||
|
|
||||||
protected Map<TopicPartition, Long> offsetsFromBeginning(Consumer<Bytes, Bytes> consumer,
|
|
||||||
List<TopicPartition> partitions) {
|
|
||||||
return consumer.beginningOffsets(partitions);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -0,0 +1,53 @@
|
||||||
|
package com.provectus.kafka.ui.emitter;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
import org.apache.kafka.clients.consumer.MockConsumer;
|
||||||
|
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
|
||||||
|
import org.apache.kafka.common.PartitionInfo;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.apache.kafka.common.utils.Bytes;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
class OffsetsInfoTest {
|
||||||
|
|
||||||
|
final String topic = "test";
|
||||||
|
final TopicPartition tp0 = new TopicPartition(topic, 0); //offsets: start 0, end 0
|
||||||
|
final TopicPartition tp1 = new TopicPartition(topic, 1); //offsets: start 10, end 10
|
||||||
|
final TopicPartition tp2 = new TopicPartition(topic, 2); //offsets: start 0, end 20
|
||||||
|
final TopicPartition tp3 = new TopicPartition(topic, 3); //offsets: start 25, end 30
|
||||||
|
|
||||||
|
MockConsumer<Bytes, Bytes> consumer;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void initMockConsumer() {
|
||||||
|
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
|
||||||
|
consumer.updatePartitions(
|
||||||
|
topic,
|
||||||
|
Stream.of(tp0, tp1, tp2, tp3)
|
||||||
|
.map(tp -> new PartitionInfo(topic, tp.partition(), null, null, null, null))
|
||||||
|
.collect(Collectors.toList()));
|
||||||
|
consumer.updateBeginningOffsets(Map.of(tp0, 0L, tp1, 10L, tp2, 0L, tp3, 25L));
|
||||||
|
consumer.updateEndOffsets(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void fillsInnerFieldsAccordingToTopicState() {
|
||||||
|
var offsets = new OffsetsInfo(consumer, List.of(tp0, tp1, tp2, tp3));
|
||||||
|
|
||||||
|
assertThat(offsets.getBeginOffsets()).containsEntry(tp0, 0L).containsEntry(tp1, 10L).containsEntry(tp2, 0L)
|
||||||
|
.containsEntry(tp3, 25L);
|
||||||
|
|
||||||
|
assertThat(offsets.getEndOffsets()).containsEntry(tp0, 0L).containsEntry(tp1, 10L).containsEntry(tp2, 20L)
|
||||||
|
.containsEntry(tp3, 30L);
|
||||||
|
|
||||||
|
assertThat(offsets.getEmptyPartitions()).contains(tp0, tp1);
|
||||||
|
assertThat(offsets.getNonEmptyPartitions()).contains(tp2, tp3);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,88 @@
|
||||||
|
package com.provectus.kafka.ui.emitter;
|
||||||
|
|
||||||
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
|
||||||
|
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
import org.apache.kafka.clients.consumer.MockConsumer;
|
||||||
|
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
|
||||||
|
import org.apache.kafka.common.PartitionInfo;
|
||||||
|
import org.apache.kafka.common.TopicPartition;
|
||||||
|
import org.apache.kafka.common.utils.Bytes;
|
||||||
|
import org.junit.jupiter.api.BeforeEach;
|
||||||
|
import org.junit.jupiter.api.Nested;
|
||||||
|
import org.junit.jupiter.api.Test;
|
||||||
|
|
||||||
|
class SeekOperationsTest {
|
||||||
|
|
||||||
|
final String topic = "test";
|
||||||
|
final TopicPartition tp0 = new TopicPartition(topic, 0); //offsets: start 0, end 0
|
||||||
|
final TopicPartition tp1 = new TopicPartition(topic, 1); //offsets: start 10, end 10
|
||||||
|
final TopicPartition tp2 = new TopicPartition(topic, 2); //offsets: start 0, end 20
|
||||||
|
final TopicPartition tp3 = new TopicPartition(topic, 3); //offsets: start 25, end 30
|
||||||
|
|
||||||
|
MockConsumer<Bytes, Bytes> consumer;
|
||||||
|
|
||||||
|
@BeforeEach
|
||||||
|
void initMockConsumer() {
|
||||||
|
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
|
||||||
|
consumer.updatePartitions(
|
||||||
|
topic,
|
||||||
|
Stream.of(tp0, tp1, tp2, tp3)
|
||||||
|
.map(tp -> new PartitionInfo(topic, tp.partition(), null, null, null, null))
|
||||||
|
.collect(Collectors.toList()));
|
||||||
|
consumer.updateBeginningOffsets(Map.of(tp0, 0L, tp1, 10L, tp2, 0L, tp3, 25L));
|
||||||
|
consumer.updateEndOffsets(Map.of(tp0, 0L, tp1, 10L, tp2, 20L, tp3, 30L));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Nested
|
||||||
|
class GetOffsetsForSeek {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void latest() {
|
||||||
|
var offsets = SeekOperations.getOffsetsForSeek(
|
||||||
|
consumer,
|
||||||
|
new OffsetsInfo(consumer, topic),
|
||||||
|
SeekTypeDTO.LATEST,
|
||||||
|
null
|
||||||
|
);
|
||||||
|
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 30L));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void beginning() {
|
||||||
|
var offsets = SeekOperations.getOffsetsForSeek(
|
||||||
|
consumer,
|
||||||
|
new OffsetsInfo(consumer, topic),
|
||||||
|
SeekTypeDTO.BEGINNING,
|
||||||
|
null
|
||||||
|
);
|
||||||
|
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 0L, tp3, 25L));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void offsets() {
|
||||||
|
var offsets = SeekOperations.getOffsetsForSeek(
|
||||||
|
consumer,
|
||||||
|
new OffsetsInfo(consumer, topic),
|
||||||
|
SeekTypeDTO.OFFSET,
|
||||||
|
Map.of(tp1, 10L, tp2, 10L, tp3, 26L)
|
||||||
|
);
|
||||||
|
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 10L, tp3, 26L));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
void offsetsWithBoundsFixing() {
|
||||||
|
var offsets = SeekOperations.getOffsetsForSeek(
|
||||||
|
consumer,
|
||||||
|
new OffsetsInfo(consumer, topic),
|
||||||
|
SeekTypeDTO.OFFSET,
|
||||||
|
Map.of(tp1, 10L, tp2, 21L, tp3, 24L)
|
||||||
|
);
|
||||||
|
assertThat(offsets).containsExactlyInAnyOrderEntriesOf(Map.of(tp2, 20L, tp3, 25L));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -111,10 +111,11 @@ class TailingEmitterTest extends AbstractIntegrationTest {
|
||||||
|
|
||||||
return applicationContext.getBean(MessagesService.class)
|
return applicationContext.getBean(MessagesService.class)
|
||||||
.loadMessages(cluster, topicName,
|
.loadMessages(cluster, topicName,
|
||||||
new ConsumerPosition(SeekTypeDTO.LATEST, Map.of(), SeekDirectionDTO.TAILING),
|
new ConsumerPosition(SeekTypeDTO.LATEST, topic, null),
|
||||||
query,
|
query,
|
||||||
MessageFilterTypeDTO.STRING_CONTAINS,
|
MessageFilterTypeDTO.STRING_CONTAINS,
|
||||||
0,
|
0,
|
||||||
|
SeekDirectionDTO.TAILING,
|
||||||
"String",
|
"String",
|
||||||
"String");
|
"String");
|
||||||
}
|
}
|
||||||
|
@ -137,7 +138,7 @@ class TailingEmitterTest extends AbstractIntegrationTest {
|
||||||
Awaitility.await()
|
Awaitility.await()
|
||||||
.pollInSameThread()
|
.pollInSameThread()
|
||||||
.pollDelay(Duration.ofMillis(100))
|
.pollDelay(Duration.ofMillis(100))
|
||||||
.atMost(Duration.ofSeconds(10))
|
.atMost(Duration.ofSeconds(200))
|
||||||
.until(() -> fluxOutput.stream()
|
.until(() -> fluxOutput.stream()
|
||||||
.anyMatch(msg -> msg.getType() == TopicMessageEventDTO.TypeEnum.CONSUMING));
|
.anyMatch(msg -> msg.getType() == TopicMessageEventDTO.TypeEnum.CONSUMING));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,104 +0,0 @@
|
||||||
package com.provectus.kafka.ui.service;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.mockito.ArgumentMatchers.any;
|
|
||||||
import static org.mockito.ArgumentMatchers.eq;
|
|
||||||
import static org.mockito.Mockito.times;
|
|
||||||
import static org.mockito.Mockito.verify;
|
|
||||||
import static org.mockito.Mockito.when;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.client.KsqlClient;
|
|
||||||
import com.provectus.kafka.ui.exception.KsqlDbNotFoundException;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.InternalKsqlServer;
|
|
||||||
import com.provectus.kafka.ui.model.KafkaCluster;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandDTO;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.strategy.ksql.statement.BaseStrategy;
|
|
||||||
import com.provectus.kafka.ui.strategy.ksql.statement.DescribeStrategy;
|
|
||||||
import com.provectus.kafka.ui.strategy.ksql.statement.ShowStrategy;
|
|
||||||
import java.util.List;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mockito.Mock;
|
|
||||||
import org.mockito.Mockito;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
import reactor.core.publisher.Mono;
|
|
||||||
import reactor.test.StepVerifier;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class KsqlServiceTest {
|
|
||||||
private KsqlService ksqlService;
|
|
||||||
private BaseStrategy baseStrategy;
|
|
||||||
private BaseStrategy alternativeStrategy;
|
|
||||||
|
|
||||||
@Mock
|
|
||||||
private ClustersStorage clustersStorage;
|
|
||||||
@Mock
|
|
||||||
private KsqlClient ksqlClient;
|
|
||||||
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
public void setUp() {
|
|
||||||
this.baseStrategy = new ShowStrategy();
|
|
||||||
this.alternativeStrategy = new DescribeStrategy();
|
|
||||||
this.ksqlService = new KsqlService(
|
|
||||||
this.ksqlClient,
|
|
||||||
List.of(baseStrategy, alternativeStrategy)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldThrowKsqlDbNotFoundExceptionOnExecuteKsqlCommand() {
|
|
||||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("show streams;");
|
|
||||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
|
||||||
when(kafkaCluster.getKsqldbServer()).thenReturn(null);
|
|
||||||
|
|
||||||
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
|
||||||
.verifyError(KsqlDbNotFoundException.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldThrowUnprocessableEntityExceptionOnExecuteKsqlCommand() {
|
|
||||||
KsqlCommandDTO command =
|
|
||||||
(new KsqlCommandDTO()).ksql("CREATE STREAM users WITH (KAFKA_TOPIC='users');");
|
|
||||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
|
||||||
when(kafkaCluster.getKsqldbServer()).thenReturn(InternalKsqlServer.builder().url("localhost:8088").build());
|
|
||||||
|
|
||||||
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
|
||||||
.verifyError(UnprocessableEntityException.class);
|
|
||||||
|
|
||||||
StepVerifier.create(ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)))
|
|
||||||
.verifyErrorMessage("Invalid sql");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSetHostToStrategy() {
|
|
||||||
String host = "localhost:8088";
|
|
||||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
|
||||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
|
||||||
|
|
||||||
when(kafkaCluster.getKsqldbServer()).thenReturn(InternalKsqlServer.builder().url(host).build());
|
|
||||||
when(ksqlClient.execute(any(), any())).thenReturn(Mono.just(new KsqlCommandResponseDTO()));
|
|
||||||
|
|
||||||
ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)).block();
|
|
||||||
assertThat(alternativeStrategy.getUri()).isEqualTo(host + "/ksql");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldCallClientAndReturnResponse() {
|
|
||||||
KsqlCommandDTO command = (new KsqlCommandDTO()).ksql("describe streams;");
|
|
||||||
KafkaCluster kafkaCluster = Mockito.mock(KafkaCluster.class);
|
|
||||||
KsqlCommandResponseDTO response = new KsqlCommandResponseDTO().message("success");
|
|
||||||
|
|
||||||
when(kafkaCluster.getKsqldbServer()).thenReturn(InternalKsqlServer.builder().url("host").build());
|
|
||||||
when(ksqlClient.execute(any(), any())).thenReturn(Mono.just(response));
|
|
||||||
|
|
||||||
KsqlCommandResponseDTO receivedResponse =
|
|
||||||
ksqlService.executeKsqlCommand(kafkaCluster, Mono.just(command)).block();
|
|
||||||
verify(ksqlClient, times(1)).execute(eq(alternativeStrategy), any());
|
|
||||||
assertThat(receivedResponse).isEqualTo(response);
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -45,7 +45,7 @@ class MessagesServiceTest extends AbstractIntegrationTest {
|
||||||
@Test
|
@Test
|
||||||
void loadMessagesReturnsExceptionWhenTopicNotFound() {
|
void loadMessagesReturnsExceptionWhenTopicNotFound() {
|
||||||
StepVerifier.create(messagesService
|
StepVerifier.create(messagesService
|
||||||
.loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, "String", "String"))
|
.loadMessages(cluster, NON_EXISTING_TOPIC, null, null, null, 1, null, "String", "String"))
|
||||||
.expectError(TopicNotFoundException.class)
|
.expectError(TopicNotFoundException.class)
|
||||||
.verify();
|
.verify();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,7 @@
|
||||||
package com.provectus.kafka.ui.service;
|
package com.provectus.kafka.ui.service;
|
||||||
|
|
||||||
import static com.provectus.kafka.ui.model.SeekDirectionDTO.BACKWARD;
|
|
||||||
import static com.provectus.kafka.ui.model.SeekDirectionDTO.FORWARD;
|
|
||||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
|
import static com.provectus.kafka.ui.model.SeekTypeDTO.BEGINNING;
|
||||||
|
import static com.provectus.kafka.ui.model.SeekTypeDTO.LATEST;
|
||||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET;
|
import static com.provectus.kafka.ui.model.SeekTypeDTO.OFFSET;
|
||||||
import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
|
import static com.provectus.kafka.ui.model.SeekTypeDTO.TIMESTAMP;
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
import static org.assertj.core.api.Assertions.assertThat;
|
||||||
|
@ -17,8 +16,6 @@ import com.provectus.kafka.ui.serde.api.Serde;
|
||||||
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
import com.provectus.kafka.ui.serdes.ConsumerRecordDeserializer;
|
||||||
import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
|
import com.provectus.kafka.ui.serdes.PropertyResolverImpl;
|
||||||
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
|
import com.provectus.kafka.ui.serdes.builtin.StringSerde;
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeekBackward;
|
|
||||||
import com.provectus.kafka.ui.util.OffsetsSeekForward;
|
|
||||||
import java.io.Serializable;
|
import java.io.Serializable;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
|
@ -112,18 +109,15 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
||||||
void pollNothingOnEmptyTopic() {
|
void pollNothingOnEmptyTopic() {
|
||||||
var forwardEmitter = new ForwardRecordEmitter(
|
var forwardEmitter = new ForwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekForward(EMPTY_TOPIC,
|
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||||
new ConsumerPosition(BEGINNING, Map.of(), FORWARD)
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
var backwardEmitter = new BackwardRecordEmitter(
|
var backwardEmitter = new BackwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekBackward(
|
new ConsumerPosition(BEGINNING, EMPTY_TOPIC, null),
|
||||||
EMPTY_TOPIC,
|
100,
|
||||||
new ConsumerPosition(BEGINNING, Map.of(), BACKWARD),
|
RECORD_DESERIALIZER
|
||||||
100
|
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
StepVerifier.create(
|
StepVerifier.create(
|
||||||
|
@ -143,17 +137,15 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
||||||
void pollFullTopicFromBeginning() {
|
void pollFullTopicFromBeginning() {
|
||||||
var forwardEmitter = new ForwardRecordEmitter(
|
var forwardEmitter = new ForwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekForward(TOPIC,
|
new ConsumerPosition(BEGINNING, TOPIC, null),
|
||||||
new ConsumerPosition(BEGINNING, Map.of(), FORWARD)
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
var backwardEmitter = new BackwardRecordEmitter(
|
var backwardEmitter = new BackwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekBackward(TOPIC,
|
new ConsumerPosition(LATEST, TOPIC, null),
|
||||||
new ConsumerPosition(BEGINNING, Map.of(), BACKWARD),
|
PARTITIONS * MSGS_PER_PARTITION,
|
||||||
PARTITIONS * MSGS_PER_PARTITION
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
List<String> expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList());
|
List<String> expectedValues = SENT_RECORDS.stream().map(Record::getValue).collect(Collectors.toList());
|
||||||
|
@ -172,17 +164,15 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
||||||
|
|
||||||
var forwardEmitter = new ForwardRecordEmitter(
|
var forwardEmitter = new ForwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekForward(TOPIC,
|
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||||
new ConsumerPosition(OFFSET, targetOffsets, FORWARD)
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
var backwardEmitter = new BackwardRecordEmitter(
|
var backwardEmitter = new BackwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekBackward(TOPIC,
|
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||||
new ConsumerPosition(OFFSET, targetOffsets, BACKWARD),
|
PARTITIONS * MSGS_PER_PARTITION,
|
||||||
PARTITIONS * MSGS_PER_PARTITION
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
var expectedValues = SENT_RECORDS.stream()
|
var expectedValues = SENT_RECORDS.stream()
|
||||||
|
@ -217,17 +207,15 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
||||||
|
|
||||||
var forwardEmitter = new ForwardRecordEmitter(
|
var forwardEmitter = new ForwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekForward(TOPIC,
|
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||||
new ConsumerPosition(TIMESTAMP, targetTimestamps, FORWARD)
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
var backwardEmitter = new BackwardRecordEmitter(
|
var backwardEmitter = new BackwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekBackward(TOPIC,
|
new ConsumerPosition(TIMESTAMP, TOPIC, targetTimestamps),
|
||||||
new ConsumerPosition(TIMESTAMP, targetTimestamps, BACKWARD),
|
PARTITIONS * MSGS_PER_PARTITION,
|
||||||
PARTITIONS * MSGS_PER_PARTITION
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
var expectedValues = SENT_RECORDS.stream()
|
var expectedValues = SENT_RECORDS.stream()
|
||||||
|
@ -255,10 +243,9 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
||||||
|
|
||||||
var backwardEmitter = new BackwardRecordEmitter(
|
var backwardEmitter = new BackwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekBackward(TOPIC,
|
new ConsumerPosition(OFFSET, TOPIC, targetOffsets),
|
||||||
new ConsumerPosition(OFFSET, targetOffsets, BACKWARD),
|
numMessages,
|
||||||
numMessages
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
var expectedValues = SENT_RECORDS.stream()
|
var expectedValues = SENT_RECORDS.stream()
|
||||||
|
@ -281,10 +268,9 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
||||||
|
|
||||||
var backwardEmitter = new BackwardRecordEmitter(
|
var backwardEmitter = new BackwardRecordEmitter(
|
||||||
this::createConsumer,
|
this::createConsumer,
|
||||||
new OffsetsSeekBackward(TOPIC,
|
new ConsumerPosition(OFFSET, TOPIC, offsets),
|
||||||
new ConsumerPosition(OFFSET, offsets, BACKWARD),
|
100,
|
||||||
100
|
RECORD_DESERIALIZER
|
||||||
), RECORD_DESERIALIZER
|
|
||||||
);
|
);
|
||||||
|
|
||||||
expectEmitter(backwardEmitter,
|
expectEmitter(backwardEmitter,
|
||||||
|
@ -331,7 +317,7 @@ class RecordEmitterTest extends AbstractIntegrationTest {
|
||||||
final Map<String, ? extends Serializable> map = Map.of(
|
final Map<String, ? extends Serializable> map = Map.of(
|
||||||
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(),
|
ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka.getBootstrapServers(),
|
||||||
ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(),
|
ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString(),
|
||||||
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 20, // to check multiple polls
|
ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 19, // to check multiple polls
|
||||||
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class,
|
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class,
|
||||||
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class
|
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class
|
||||||
);
|
);
|
||||||
|
|
|
@ -502,12 +502,13 @@ public class SendAndReadTests extends AbstractIntegrationTest {
|
||||||
topic,
|
topic,
|
||||||
new ConsumerPosition(
|
new ConsumerPosition(
|
||||||
SeekTypeDTO.BEGINNING,
|
SeekTypeDTO.BEGINNING,
|
||||||
Map.of(new TopicPartition(topic, 0), 0L),
|
topic,
|
||||||
SeekDirectionDTO.FORWARD
|
Map.of(new TopicPartition(topic, 0), 0L)
|
||||||
),
|
),
|
||||||
null,
|
null,
|
||||||
null,
|
null,
|
||||||
1,
|
1,
|
||||||
|
SeekDirectionDTO.FORWARD,
|
||||||
msgToSend.getKeySerde().get(),
|
msgToSend.getKeySerde().get(),
|
||||||
msgToSend.getValueSerde().get()
|
msgToSend.getValueSerde().get()
|
||||||
).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
).filter(e -> e.getType().equals(TopicMessageEventDTO.TypeEnum.MESSAGE))
|
||||||
|
|
|
@ -1,85 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class CreateStrategyTest {
|
|
||||||
private final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
private CreateStrategy strategy;
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void setUp() {
|
|
||||||
strategy = new CreateStrategy();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnUri() {
|
|
||||||
strategy.host("ksqldb-server:8088");
|
|
||||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnTrueInTest() {
|
|
||||||
assertTrue(strategy.test("CREATE STREAM stream WITH (KAFKA_TOPIC='topic');"));
|
|
||||||
assertTrue(strategy.test("CREATE STREAM stream"
|
|
||||||
+ " AS SELECT users.id AS userid FROM users EMIT CHANGES;"
|
|
||||||
));
|
|
||||||
assertTrue(strategy.test(
|
|
||||||
"CREATE TABLE table (id VARCHAR) WITH (KAFKA_TOPIC='table');"
|
|
||||||
));
|
|
||||||
assertTrue(strategy.test(
|
|
||||||
"CREATE TABLE pageviews_regions WITH (KEY_FORMAT='JSON')"
|
|
||||||
+ " AS SELECT gender, COUNT(*) AS numbers"
|
|
||||||
+ " FROM pageviews EMIT CHANGES;"
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnFalseInTest() {
|
|
||||||
assertFalse(strategy.test("show streams;"));
|
|
||||||
assertFalse(strategy.test("show tables;"));
|
|
||||||
assertFalse(strategy.test("CREATE TABLE test;"));
|
|
||||||
assertFalse(strategy.test("CREATE STREAM test;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeResponse() {
|
|
||||||
String message = "updated successful";
|
|
||||||
JsonNode node = getResponseWithMessage(message);
|
|
||||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
|
||||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeWithException() {
|
|
||||||
JsonNode commandStatusNode = mapper.createObjectNode().put("commandStatus", "nodeWithMessage");
|
|
||||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
|
||||||
Exception exception = assertThrows(
|
|
||||||
UnprocessableEntityException.class,
|
|
||||||
() -> strategy.serializeResponse(node)
|
|
||||||
);
|
|
||||||
|
|
||||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private JsonNode getResponseWithMessage(String message) {
|
|
||||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("message", message);
|
|
||||||
JsonNode commandStatusNode = mapper.createObjectNode().set("commandStatus", nodeWithMessage);
|
|
||||||
return mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,76 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TableDTO;
|
|
||||||
import java.util.List;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class DescribeStrategyTest {
|
|
||||||
private final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
private DescribeStrategy strategy;
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void setUp() {
|
|
||||||
strategy = new DescribeStrategy();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnUri() {
|
|
||||||
strategy.host("ksqldb-server:8088");
|
|
||||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnTrueInTest() {
|
|
||||||
assertTrue(strategy.test("DESCRIBE users;"));
|
|
||||||
assertTrue(strategy.test("DESCRIBE EXTENDED users;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnFalseInTest() {
|
|
||||||
assertFalse(strategy.test("list streams;"));
|
|
||||||
assertFalse(strategy.test("show tables;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeResponse() {
|
|
||||||
JsonNode node = getResponseWithObjectNode();
|
|
||||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
|
||||||
TableDTO table = serializedResponse.getData();
|
|
||||||
assertThat(table.getHeaders()).isEqualTo(List.of("key", "value"));
|
|
||||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("name", "kafka")));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeWithException() {
|
|
||||||
JsonNode sourceDescriptionNode =
|
|
||||||
mapper.createObjectNode().put("sourceDescription", "nodeWithMessage");
|
|
||||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(sourceDescriptionNode));
|
|
||||||
Exception exception = assertThrows(
|
|
||||||
UnprocessableEntityException.class,
|
|
||||||
() -> strategy.serializeResponse(node)
|
|
||||||
);
|
|
||||||
|
|
||||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private JsonNode getResponseWithObjectNode() {
|
|
||||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("name", "kafka");
|
|
||||||
JsonNode nodeWithResponse = mapper.createObjectNode().set("sourceDescription", nodeWithMessage);
|
|
||||||
return mapper.createArrayNode().add(mapper.valueToTree(nodeWithResponse));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,75 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class DropStrategyTest {
|
|
||||||
private final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
private DropStrategy strategy;
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void setUp() {
|
|
||||||
strategy = new DropStrategy();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnUri() {
|
|
||||||
strategy.host("ksqldb-server:8088");
|
|
||||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnTrueInTest() {
|
|
||||||
assertTrue(strategy.test("drop table table1;"));
|
|
||||||
assertTrue(strategy.test("drop stream stream2;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnFalseInTest() {
|
|
||||||
assertFalse(strategy.test("show streams;"));
|
|
||||||
assertFalse(strategy.test("show tables;"));
|
|
||||||
assertFalse(strategy.test("create table test;"));
|
|
||||||
assertFalse(strategy.test("create stream test;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeResponse() {
|
|
||||||
String message = "updated successful";
|
|
||||||
JsonNode node = getResponseWithMessage(message);
|
|
||||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
|
||||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeWithException() {
|
|
||||||
JsonNode commandStatusNode = mapper.createObjectNode().put("commandStatus", "nodeWithMessage");
|
|
||||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
|
||||||
Exception exception = assertThrows(
|
|
||||||
UnprocessableEntityException.class,
|
|
||||||
() -> strategy.serializeResponse(node)
|
|
||||||
);
|
|
||||||
|
|
||||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private JsonNode getResponseWithMessage(String message) {
|
|
||||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("message", message);
|
|
||||||
JsonNode commandStatusNode = mapper.createObjectNode().set("commandStatus", nodeWithMessage);
|
|
||||||
return mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,74 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TableDTO;
|
|
||||||
import java.util.List;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class ExplainStrategyTest {
|
|
||||||
private final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
private ExplainStrategy strategy;
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void setUp() {
|
|
||||||
strategy = new ExplainStrategy();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnUri() {
|
|
||||||
strategy.host("ksqldb-server:8088");
|
|
||||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnTrueInTest() {
|
|
||||||
assertTrue(strategy.test("explain users_query_id;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnFalseInTest() {
|
|
||||||
assertFalse(strategy.test("show queries;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeResponse() {
|
|
||||||
JsonNode node = getResponseWithObjectNode();
|
|
||||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
|
||||||
TableDTO table = serializedResponse.getData();
|
|
||||||
assertThat(table.getHeaders()).isEqualTo(List.of("key", "value"));
|
|
||||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("name", "kafka")));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeWithException() {
|
|
||||||
JsonNode sourceDescriptionNode =
|
|
||||||
mapper.createObjectNode().put("sourceDescription", "nodeWithMessage");
|
|
||||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(sourceDescriptionNode));
|
|
||||||
Exception exception = assertThrows(
|
|
||||||
UnprocessableEntityException.class,
|
|
||||||
() -> strategy.serializeResponse(node)
|
|
||||||
);
|
|
||||||
|
|
||||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private JsonNode getResponseWithObjectNode() {
|
|
||||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("name", "kafka");
|
|
||||||
JsonNode nodeWithResponse = mapper.createObjectNode().set("queryDescription", nodeWithMessage);
|
|
||||||
return mapper.createArrayNode().add(mapper.valueToTree(nodeWithResponse));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,79 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TableDTO;
|
|
||||||
import java.util.List;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class SelectStrategyTest {
|
|
||||||
private final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
private SelectStrategy strategy;
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void setUp() {
|
|
||||||
strategy = new SelectStrategy();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnUri() {
|
|
||||||
strategy.host("ksqldb-server:8088");
|
|
||||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/query");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnTrueInTest() {
|
|
||||||
assertTrue(strategy.test("select * from users;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnFalseInTest() {
|
|
||||||
assertFalse(strategy.test("show streams;"));
|
|
||||||
assertFalse(strategy.test("select *;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeResponse() {
|
|
||||||
JsonNode node = getResponseWithData();
|
|
||||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
|
||||||
TableDTO table = serializedResponse.getData();
|
|
||||||
assertThat(table.getHeaders()).isEqualTo(List.of("header1", "header2"));
|
|
||||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value1", "value2")));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeWithException() {
|
|
||||||
JsonNode node = mapper.createObjectNode();
|
|
||||||
Exception exception = assertThrows(
|
|
||||||
UnprocessableEntityException.class,
|
|
||||||
() -> strategy.serializeResponse(node)
|
|
||||||
);
|
|
||||||
|
|
||||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private JsonNode getResponseWithData() {
|
|
||||||
JsonNode headerNode = mapper.createObjectNode().set(
|
|
||||||
"header", mapper.createObjectNode().put("schema", "header1, header2")
|
|
||||||
);
|
|
||||||
JsonNode row = mapper.createObjectNode().set(
|
|
||||||
"row", mapper.createObjectNode().set(
|
|
||||||
"columns", mapper.createArrayNode().add("value1").add("value2")
|
|
||||||
)
|
|
||||||
);
|
|
||||||
return mapper.createArrayNode().add(headerNode).add(row);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,102 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import com.provectus.kafka.ui.model.TableDTO;
|
|
||||||
import java.util.List;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.DynamicTest;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.TestFactory;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class ShowStrategyTest {
|
|
||||||
private final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
private ShowStrategy strategy;
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void setUp() {
|
|
||||||
strategy = new ShowStrategy();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnUri() {
|
|
||||||
strategy.host("ksqldb-server:8088");
|
|
||||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnTrueInTest() {
|
|
||||||
assertTrue(strategy.test("SHOW STREAMS;"));
|
|
||||||
assertTrue(strategy.test("SHOW TABLES;"));
|
|
||||||
assertTrue(strategy.test("SHOW TOPICS;"));
|
|
||||||
assertTrue(strategy.test("SHOW QUERIES;"));
|
|
||||||
assertTrue(strategy.test("SHOW PROPERTIES;"));
|
|
||||||
assertTrue(strategy.test("SHOW FUNCTIONS;"));
|
|
||||||
assertTrue(strategy.test("LIST STREAMS;"));
|
|
||||||
assertTrue(strategy.test("LIST TABLES;"));
|
|
||||||
assertTrue(strategy.test("LIST TOPICS;"));
|
|
||||||
assertTrue(strategy.test("LIST FUNCTIONS;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnFalseInTest() {
|
|
||||||
assertFalse(strategy.test("LIST QUERIES;"));
|
|
||||||
assertFalse(strategy.test("LIST PROPERTIES;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@TestFactory
|
|
||||||
public Iterable<DynamicTest> shouldSerialize() {
|
|
||||||
return List.of(
|
|
||||||
shouldSerializeGenerate("streams", "show streams;"),
|
|
||||||
shouldSerializeGenerate("tables", "show tables;"),
|
|
||||||
shouldSerializeGenerate("topics", "show topics;"),
|
|
||||||
shouldSerializeGenerate("properties", "show properties;"),
|
|
||||||
shouldSerializeGenerate("functions", "show functions;"),
|
|
||||||
shouldSerializeGenerate("queries", "show queries;")
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
public DynamicTest shouldSerializeGenerate(final String key, final String sql) {
|
|
||||||
return DynamicTest.dynamicTest("Should serialize " + key,
|
|
||||||
() -> {
|
|
||||||
JsonNode node = getResponseWithData(key);
|
|
||||||
strategy.test(sql);
|
|
||||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
|
||||||
TableDTO table = serializedResponse.getData();
|
|
||||||
assertThat(table.getHeaders()).isEqualTo(List.of("header"));
|
|
||||||
assertThat(table.getRows()).isEqualTo(List.of(List.of("value")));
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeWithException() {
|
|
||||||
JsonNode node = getResponseWithData("streams");
|
|
||||||
strategy.test("show tables;");
|
|
||||||
Exception exception = assertThrows(
|
|
||||||
UnprocessableEntityException.class,
|
|
||||||
() -> strategy.serializeResponse(node)
|
|
||||||
);
|
|
||||||
|
|
||||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private JsonNode getResponseWithData(String key) {
|
|
||||||
JsonNode nodeWithDataItem = mapper.createObjectNode().put("header", "value");
|
|
||||||
JsonNode nodeWithData = mapper.createArrayNode().add(nodeWithDataItem);
|
|
||||||
JsonNode nodeWithResponse = mapper.createObjectNode().set(key, nodeWithData);
|
|
||||||
return mapper.createArrayNode().add(mapper.valueToTree(nodeWithResponse));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,72 +0,0 @@
|
||||||
package com.provectus.kafka.ui.strategy.ksql.statement;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertThrows;
|
|
||||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.JsonNode;
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.provectus.kafka.ui.exception.UnprocessableEntityException;
|
|
||||||
import com.provectus.kafka.ui.model.KsqlCommandResponseDTO;
|
|
||||||
import lombok.SneakyThrows;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
import org.junit.jupiter.api.extension.ExtendWith;
|
|
||||||
import org.mockito.junit.jupiter.MockitoExtension;
|
|
||||||
|
|
||||||
@ExtendWith(MockitoExtension.class)
|
|
||||||
class TerminateStrategyTest {
|
|
||||||
private final ObjectMapper mapper = new ObjectMapper();
|
|
||||||
private TerminateStrategy strategy;
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void setUp() {
|
|
||||||
strategy = new TerminateStrategy();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnUri() {
|
|
||||||
strategy.host("ksqldb-server:8088");
|
|
||||||
assertThat(strategy.getUri()).isEqualTo("ksqldb-server:8088/ksql");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnTrueInTest() {
|
|
||||||
assertTrue(strategy.test("terminate query_id;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldReturnFalseInTest() {
|
|
||||||
assertFalse(strategy.test("show streams;"));
|
|
||||||
assertFalse(strategy.test("create table test;"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeResponse() {
|
|
||||||
String message = "query terminated.";
|
|
||||||
JsonNode node = getResponseWithMessage(message);
|
|
||||||
KsqlCommandResponseDTO serializedResponse = strategy.serializeResponse(node);
|
|
||||||
assertThat(serializedResponse.getMessage()).isEqualTo(message);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void shouldSerializeWithException() {
|
|
||||||
JsonNode commandStatusNode = mapper.createObjectNode().put("commandStatus", "nodeWithMessage");
|
|
||||||
JsonNode node = mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
|
||||||
Exception exception = assertThrows(
|
|
||||||
UnprocessableEntityException.class,
|
|
||||||
() -> strategy.serializeResponse(node)
|
|
||||||
);
|
|
||||||
|
|
||||||
assertThat(exception.getMessage()).isEqualTo("KSQL DB response mapping error");
|
|
||||||
}
|
|
||||||
|
|
||||||
@SneakyThrows
|
|
||||||
private JsonNode getResponseWithMessage(String message) {
|
|
||||||
JsonNode nodeWithMessage = mapper.createObjectNode().put("message", message);
|
|
||||||
JsonNode commandStatusNode = mapper.createObjectNode().set("commandStatus", nodeWithMessage);
|
|
||||||
return mapper.createArrayNode().add(mapper.valueToTree(commandStatusNode));
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,196 +0,0 @@
|
||||||
package com.provectus.kafka.ui.util;
|
|
||||||
|
|
||||||
import static org.assertj.core.api.Assertions.assertThat;
|
|
||||||
|
|
||||||
import com.provectus.kafka.ui.model.ConsumerPosition;
|
|
||||||
import com.provectus.kafka.ui.model.SeekDirectionDTO;
|
|
||||||
import com.provectus.kafka.ui.model.SeekTypeDTO;
|
|
||||||
import java.util.List;
|
|
||||||
import java.util.Map;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
import java.util.stream.Stream;
|
|
||||||
import org.apache.kafka.clients.consumer.ConsumerRecord;
|
|
||||||
import org.apache.kafka.clients.consumer.MockConsumer;
|
|
||||||
import org.apache.kafka.clients.consumer.OffsetResetStrategy;
|
|
||||||
import org.apache.kafka.common.PartitionInfo;
|
|
||||||
import org.apache.kafka.common.TopicPartition;
|
|
||||||
import org.apache.kafka.common.utils.Bytes;
|
|
||||||
import org.junit.jupiter.api.BeforeEach;
|
|
||||||
import org.junit.jupiter.api.Nested;
|
|
||||||
import org.junit.jupiter.api.Test;
|
|
||||||
|
|
||||||
class OffsetsSeekTest {
|
|
||||||
|
|
||||||
final String topic = "test";
|
|
||||||
final TopicPartition tp0 = new TopicPartition(topic, 0); //offsets: start 0, end 0
|
|
||||||
final TopicPartition tp1 = new TopicPartition(topic, 1); //offsets: start 10, end 10
|
|
||||||
final TopicPartition tp2 = new TopicPartition(topic, 2); //offsets: start 0, end 20
|
|
||||||
final TopicPartition tp3 = new TopicPartition(topic, 3); //offsets: start 25, end 30
|
|
||||||
|
|
||||||
MockConsumer<Bytes, Bytes> consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void initConsumer() {
|
|
||||||
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
|
|
||||||
consumer.updatePartitions(
|
|
||||||
topic,
|
|
||||||
Stream.of(tp0, tp1, tp2, tp3)
|
|
||||||
.map(tp -> new PartitionInfo(topic, tp.partition(), null, null, null, null))
|
|
||||||
.collect(Collectors.toList()));
|
|
||||||
consumer.updateBeginningOffsets(Map.of(
|
|
||||||
tp0, 0L,
|
|
||||||
tp1, 10L,
|
|
||||||
tp2, 0L,
|
|
||||||
tp3, 25L
|
|
||||||
));
|
|
||||||
consumer.updateEndOffsets(Map.of(
|
|
||||||
tp0, 0L,
|
|
||||||
tp1, 10L,
|
|
||||||
tp2, 20L,
|
|
||||||
tp3, 30L
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void forwardSeekToBeginningAllPartitions() {
|
|
||||||
var seek = new OffsetsSeekForward(
|
|
||||||
topic,
|
|
||||||
new ConsumerPosition(
|
|
||||||
SeekTypeDTO.BEGINNING,
|
|
||||||
Map.of(tp0, 0L, tp1, 0L),
|
|
||||||
SeekDirectionDTO.FORWARD
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
seek.assignAndSeek(consumer);
|
|
||||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1);
|
|
||||||
assertThat(consumer.position(tp0)).isZero();
|
|
||||||
assertThat(consumer.position(tp1)).isEqualTo(10L);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void backwardSeekToBeginningAllPartitions() {
|
|
||||||
var seek = new OffsetsSeekBackward(
|
|
||||||
topic,
|
|
||||||
new ConsumerPosition(
|
|
||||||
SeekTypeDTO.BEGINNING,
|
|
||||||
Map.of(tp2, 0L, tp3, 0L),
|
|
||||||
SeekDirectionDTO.BACKWARD
|
|
||||||
),
|
|
||||||
10
|
|
||||||
);
|
|
||||||
|
|
||||||
seek.assignAndSeek(consumer);
|
|
||||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2, tp3);
|
|
||||||
assertThat(consumer.position(tp2)).isEqualTo(20L);
|
|
||||||
assertThat(consumer.position(tp3)).isEqualTo(30L);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void forwardSeekToBeginningWithPartitionsList() {
|
|
||||||
var seek = new OffsetsSeekForward(
|
|
||||||
topic,
|
|
||||||
new ConsumerPosition(SeekTypeDTO.BEGINNING, Map.of(), SeekDirectionDTO.FORWARD));
|
|
||||||
seek.assignAndSeek(consumer);
|
|
||||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
|
|
||||||
assertThat(consumer.position(tp0)).isZero();
|
|
||||||
assertThat(consumer.position(tp1)).isEqualTo(10L);
|
|
||||||
assertThat(consumer.position(tp2)).isZero();
|
|
||||||
assertThat(consumer.position(tp3)).isEqualTo(25L);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void backwardSeekToBeginningWithPartitionsList() {
|
|
||||||
var seek = new OffsetsSeekBackward(
|
|
||||||
topic,
|
|
||||||
new ConsumerPosition(SeekTypeDTO.BEGINNING, Map.of(), SeekDirectionDTO.BACKWARD),
|
|
||||||
10
|
|
||||||
);
|
|
||||||
seek.assignAndSeek(consumer);
|
|
||||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp0, tp1, tp2, tp3);
|
|
||||||
assertThat(consumer.position(tp0)).isZero();
|
|
||||||
assertThat(consumer.position(tp1)).isEqualTo(10L);
|
|
||||||
assertThat(consumer.position(tp2)).isEqualTo(20L);
|
|
||||||
assertThat(consumer.position(tp3)).isEqualTo(30L);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void forwardSeekToOffset() {
|
|
||||||
var seek = new OffsetsSeekForward(
|
|
||||||
topic,
|
|
||||||
new ConsumerPosition(
|
|
||||||
SeekTypeDTO.OFFSET,
|
|
||||||
Map.of(tp0, 0L, tp1, 1L, tp2, 2L),
|
|
||||||
SeekDirectionDTO.FORWARD
|
|
||||||
)
|
|
||||||
);
|
|
||||||
seek.assignAndSeek(consumer);
|
|
||||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
|
|
||||||
assertThat(consumer.position(tp2)).isEqualTo(2L);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void backwardSeekToOffset() {
|
|
||||||
var seek = new OffsetsSeekBackward(
|
|
||||||
topic,
|
|
||||||
new ConsumerPosition(
|
|
||||||
SeekTypeDTO.OFFSET,
|
|
||||||
Map.of(tp0, 0L, tp1, 1L, tp2, 20L),
|
|
||||||
SeekDirectionDTO.BACKWARD
|
|
||||||
),
|
|
||||||
2
|
|
||||||
);
|
|
||||||
seek.assignAndSeek(consumer);
|
|
||||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
|
|
||||||
assertThat(consumer.position(tp2)).isEqualTo(20L);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void backwardSeekToOffsetOnlyOnePartition() {
|
|
||||||
var seek = new OffsetsSeekBackward(
|
|
||||||
topic,
|
|
||||||
new ConsumerPosition(
|
|
||||||
SeekTypeDTO.OFFSET,
|
|
||||||
Map.of(tp2, 20L),
|
|
||||||
SeekDirectionDTO.BACKWARD
|
|
||||||
),
|
|
||||||
20
|
|
||||||
);
|
|
||||||
seek.assignAndSeek(consumer);
|
|
||||||
assertThat(consumer.assignment()).containsExactlyInAnyOrder(tp2);
|
|
||||||
assertThat(consumer.position(tp2)).isEqualTo(20L);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@Nested
|
|
||||||
class WaitingOffsetsTest {
|
|
||||||
|
|
||||||
OffsetsSeekForward.WaitingOffsets offsets;
|
|
||||||
|
|
||||||
@BeforeEach
|
|
||||||
void assignAndCreateOffsets() {
|
|
||||||
consumer.assign(List.of(tp0, tp1, tp2, tp3));
|
|
||||||
offsets = new OffsetsSeek.WaitingOffsets(topic, consumer, List.of(tp0, tp1, tp2, tp3));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void collectsSignificantOffsetsMinus1ForAssignedPartitions() {
|
|
||||||
// offsets for partition 0 & 1 should be skipped because they
|
|
||||||
// effectively contains no data (start offset = end offset)
|
|
||||||
assertThat(offsets.getEndOffsets()).containsExactlyInAnyOrderEntriesOf(
|
|
||||||
Map.of(2, 19L, 3, 29L)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
void returnTrueWhenOffsetsReachedReached() {
|
|
||||||
assertThat(offsets.endReached()).isFalse();
|
|
||||||
offsets.markPolled(new ConsumerRecord<>(topic, 2, 19, null, null));
|
|
||||||
assertThat(offsets.endReached()).isFalse();
|
|
||||||
offsets.markPolled(new ConsumerRecord<>(topic, 3, 29, null, null));
|
|
||||||
assertThat(offsets.endReached()).isTrue();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -862,28 +862,6 @@ paths:
|
||||||
200:
|
200:
|
||||||
description: OK
|
description: OK
|
||||||
|
|
||||||
/api/clusters/{clusterName}/consumer-groups:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- Consumer Groups
|
|
||||||
summary: get all ConsumerGroups
|
|
||||||
operationId: getConsumerGroups
|
|
||||||
parameters:
|
|
||||||
- name: clusterName
|
|
||||||
in: path
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
responses:
|
|
||||||
200:
|
|
||||||
description: OK
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
$ref: '#/components/schemas/ConsumerGroup'
|
|
||||||
|
|
||||||
/api/clusters/{clusterName}/consumer-groups/{id}/offsets:
|
/api/clusters/{clusterName}/consumer-groups/{id}/offsets:
|
||||||
post:
|
post:
|
||||||
tags:
|
tags:
|
||||||
|
@ -1561,31 +1539,6 @@ paths:
|
||||||
200:
|
200:
|
||||||
description: OK
|
description: OK
|
||||||
|
|
||||||
/api/clusters/{clusterName}/ksql:
|
|
||||||
description: Deprecated - use ksql/v2 instead!
|
|
||||||
post:
|
|
||||||
tags:
|
|
||||||
- Ksql
|
|
||||||
summary: executeKsqlCommand
|
|
||||||
operationId: executeKsqlCommand
|
|
||||||
parameters:
|
|
||||||
- name: clusterName
|
|
||||||
in: path
|
|
||||||
required: true
|
|
||||||
schema:
|
|
||||||
type: string
|
|
||||||
requestBody:
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
$ref: '#/components/schemas/KsqlCommand'
|
|
||||||
responses:
|
|
||||||
200:
|
|
||||||
description: OK
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
$ref: '#/components/schemas/KsqlCommandResponse'
|
|
||||||
|
|
||||||
/api/clusters/{clusterName}/ksql/v2:
|
/api/clusters/{clusterName}/ksql/v2:
|
||||||
post:
|
post:
|
||||||
|
@ -1885,7 +1838,7 @@ paths:
|
||||||
get:
|
get:
|
||||||
tags:
|
tags:
|
||||||
- TimeStampFormat
|
- TimeStampFormat
|
||||||
summary: getTimeStampFormat
|
summary: get system default datetime format
|
||||||
operationId: getTimeStampFormat
|
operationId: getTimeStampFormat
|
||||||
responses:
|
responses:
|
||||||
200:
|
200:
|
||||||
|
@ -1894,6 +1847,21 @@ paths:
|
||||||
application/json:
|
application/json:
|
||||||
schema:
|
schema:
|
||||||
$ref: '#/components/schemas/TimeStampFormat'
|
$ref: '#/components/schemas/TimeStampFormat'
|
||||||
|
|
||||||
|
/api/info/timestampformat/iso:
|
||||||
|
get:
|
||||||
|
tags:
|
||||||
|
- TimeStampFormat
|
||||||
|
summary: get system default datetime format (in ISO format, for JS)
|
||||||
|
operationId: getTimeStampFormatISO
|
||||||
|
responses:
|
||||||
|
200:
|
||||||
|
description: OK
|
||||||
|
content:
|
||||||
|
application/json:
|
||||||
|
schema:
|
||||||
|
$ref: '#/components/schemas/TimeStampFormat'
|
||||||
|
|
||||||
components:
|
components:
|
||||||
schemas:
|
schemas:
|
||||||
TopicSerdeSuggestion:
|
TopicSerdeSuggestion:
|
||||||
|
@ -3094,18 +3062,6 @@ components:
|
||||||
items:
|
items:
|
||||||
$ref: '#/components/schemas/ConnectorPluginConfig'
|
$ref: '#/components/schemas/ConnectorPluginConfig'
|
||||||
|
|
||||||
KsqlCommand:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
ksql:
|
|
||||||
type: string
|
|
||||||
streamsProperties:
|
|
||||||
type: object
|
|
||||||
additionalProperties:
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- ksql
|
|
||||||
|
|
||||||
KsqlCommandV2:
|
KsqlCommandV2:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
@ -3152,31 +3108,6 @@ components:
|
||||||
valueFormat:
|
valueFormat:
|
||||||
type: string
|
type: string
|
||||||
|
|
||||||
KsqlCommandResponse:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
data:
|
|
||||||
$ref: '#/components/schemas/Table'
|
|
||||||
message:
|
|
||||||
type: string
|
|
||||||
|
|
||||||
Table:
|
|
||||||
type: object
|
|
||||||
properties:
|
|
||||||
headers:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
rows:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
required:
|
|
||||||
- headers
|
|
||||||
- rows
|
|
||||||
|
|
||||||
KsqlResponse:
|
KsqlResponse:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
<maven.surefire-plugin.version>2.22.2</maven.surefire-plugin.version>
|
<maven.surefire-plugin.version>2.22.2</maven.surefire-plugin.version>
|
||||||
<allure-maven.version>2.10.0</allure-maven.version>
|
<allure-maven.version>2.10.0</allure-maven.version>
|
||||||
<kafka.version>3.0.0</kafka.version>
|
<kafka.version>3.0.0</kafka.version>
|
||||||
<netty.version>4.1.77.Final</netty.version>
|
<netty.version>4.1.84.Final</netty.version>
|
||||||
<qase.io.version>2.1.3</qase.io.version>
|
<qase.io.version>2.1.3</qase.io.version>
|
||||||
</properties>
|
</properties>
|
||||||
|
|
||||||
|
|
|
@ -6,5 +6,5 @@ import lombok.experimental.Accessors;
|
||||||
@Data
|
@Data
|
||||||
@Accessors(chain = true)
|
@Accessors(chain = true)
|
||||||
public class Topic {
|
public class Topic {
|
||||||
private String name, compactPolicyValue, timeToRetainData, maxSizeOnDisk, maxMessageBytes, messageKey, messageContent ;
|
private String name, cleanupPolicyValue, timeToRetainData, maxSizeOnDisk, maxMessageBytes, messageKey, messageContent ;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,8 @@ public class KafkaConnectList {
|
||||||
|
|
||||||
@Step
|
@Step
|
||||||
public KafkaConnectList openConnector(String connectorName) {
|
public KafkaConnectList openConnector(String connectorName) {
|
||||||
$x(String.format(tabElementLocator,connectorName)).shouldBe(Condition.visible).click();
|
$x(String.format(tabElementLocator,connectorName))
|
||||||
|
.shouldBe(Condition.enabled).click();
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,8 @@ public class SchemaRegistryList {
|
||||||
|
|
||||||
@Step
|
@Step
|
||||||
public SchemaRegistryList openSchema(String schemaName) {
|
public SchemaRegistryList openSchema(String schemaName) {
|
||||||
$x(String.format(schemaTabElementLocator,schemaName)).shouldBe(Condition.visible).click();
|
$x(String.format(schemaTabElementLocator,schemaName))
|
||||||
|
.shouldBe(Condition.enabled).click();
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,8 @@ public class TopicDetails {
|
||||||
|
|
||||||
protected SelenideElement loadingSpinner = $x("//*[contains(text(),'Loading')]");
|
protected SelenideElement loadingSpinner = $x("//*[contains(text(),'Loading')]");
|
||||||
protected SelenideElement dotMenuBtn = $$x("//button[@aria-label='Dropdown Toggle']").first();
|
protected SelenideElement dotMenuBtn = $$x("//button[@aria-label='Dropdown Toggle']").first();
|
||||||
|
protected SelenideElement dotPartitionIdMenuBtn = $(By.cssSelector("button.sc-hOqruk.eYtACj"));
|
||||||
|
protected SelenideElement clearMessagesBtn = $x(("//div[contains(text(), 'Clear messages')]"));
|
||||||
protected SelenideElement overviewTab = $x("//a[contains(text(),'Overview')]");
|
protected SelenideElement overviewTab = $x("//a[contains(text(),'Overview')]");
|
||||||
protected SelenideElement messagesTab = $x("//a[contains(text(),'Messages')]");
|
protected SelenideElement messagesTab = $x("//a[contains(text(),'Messages')]");
|
||||||
protected SelenideElement editSettingsTab = $x("//li[@role][contains(text(),'Edit settings')]");
|
protected SelenideElement editSettingsTab = $x("//li[@role][contains(text(),'Edit settings')]");
|
||||||
|
@ -45,6 +47,18 @@ public class TopicDetails {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Step
|
||||||
|
public TopicDetails openDotPartitionIdMenu() {
|
||||||
|
dotPartitionIdMenuBtn.shouldBe(Condition.visible.because("dot menu invisible")).click();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Step
|
||||||
|
public TopicDetails clickClearMessagesBtn() {
|
||||||
|
clearMessagesBtn.shouldBe(Condition.visible.because("Clear Messages invisible")).click();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
@Step
|
@Step
|
||||||
public TopicDetails deleteTopic() {
|
public TopicDetails deleteTopic() {
|
||||||
clickByJavaScript(dotMenuBtn);
|
clickByJavaScript(dotMenuBtn);
|
||||||
|
@ -70,6 +84,11 @@ public class TopicDetails {
|
||||||
return contentMessage.matches(contentMessageTab.getText().trim());
|
return contentMessage.matches(contentMessageTab.getText().trim());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Step
|
||||||
|
public String MessageCountAmount() {
|
||||||
|
return $(By.xpath("//table[@class=\"sc-hiSbEG cvnuic\"]/tbody/tr/td[5]")).getText();
|
||||||
|
}
|
||||||
|
|
||||||
private enum DotMenuHeaderItems {
|
private enum DotMenuHeaderItems {
|
||||||
EDIT_SETTINGS("Edit settings"),
|
EDIT_SETTINGS("Edit settings"),
|
||||||
CLEAR_MESSAGES("Clear messages"),
|
CLEAR_MESSAGES("Clear messages"),
|
||||||
|
@ -91,6 +110,26 @@ public class TopicDetails {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public enum DotPartitionIdMenu {
|
||||||
|
CLEAR_MESSAGES("Clear messages");
|
||||||
|
|
||||||
|
|
||||||
|
private final String value;
|
||||||
|
|
||||||
|
DotPartitionIdMenu(String value) {
|
||||||
|
this.value = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getValue() {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "DotPartitionIdMenuItems{" + "value='" + value + '\'' + '}';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public enum TopicMenu {
|
public enum TopicMenu {
|
||||||
OVERVIEW("Overview"),
|
OVERVIEW("Overview"),
|
||||||
MESSAGES("Messages"),
|
MESSAGES("Messages"),
|
||||||
|
|
|
@ -42,7 +42,8 @@ public class TopicsList {
|
||||||
|
|
||||||
@Step
|
@Step
|
||||||
public TopicsList openTopic(String topicName) {
|
public TopicsList openTopic(String topicName) {
|
||||||
$(By.linkText(topicName)).click();
|
$(By.linkText(topicName))
|
||||||
|
.shouldBe(Condition.enabled).click();
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import com.provectus.kafka.ui.pages.topic.TopicDetails;
|
||||||
import com.provectus.kafka.ui.utilities.qaseIoUtils.annotations.AutomationStatus;
|
import com.provectus.kafka.ui.utilities.qaseIoUtils.annotations.AutomationStatus;
|
||||||
import com.provectus.kafka.ui.utilities.qaseIoUtils.annotations.Suite;
|
import com.provectus.kafka.ui.utilities.qaseIoUtils.annotations.Suite;
|
||||||
import com.provectus.kafka.ui.utilities.qaseIoUtils.enums.Status;
|
import com.provectus.kafka.ui.utilities.qaseIoUtils.enums.Status;
|
||||||
|
import io.qameta.allure.Issue;
|
||||||
import io.qase.api.annotation.CaseId;
|
import io.qase.api.annotation.CaseId;
|
||||||
import org.assertj.core.api.SoftAssertions;
|
import org.assertj.core.api.SoftAssertions;
|
||||||
import org.junit.jupiter.api.*;
|
import org.junit.jupiter.api.*;
|
||||||
|
@ -14,6 +15,7 @@ import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
|
||||||
import static com.provectus.kafka.ui.pages.NaviSideBar.SideMenuOption.TOPICS;
|
import static com.provectus.kafka.ui.pages.NaviSideBar.SideMenuOption.TOPICS;
|
||||||
|
import static com.provectus.kafka.ui.pages.topic.TopicDetails.DotPartitionIdMenu.CLEAR_MESSAGES;
|
||||||
import static com.provectus.kafka.ui.settings.Source.CLUSTER_NAME;
|
import static com.provectus.kafka.ui.settings.Source.CLUSTER_NAME;
|
||||||
import static com.provectus.kafka.ui.utilities.FileUtils.fileToString;
|
import static com.provectus.kafka.ui.utilities.FileUtils.fileToString;
|
||||||
|
|
||||||
|
@ -23,18 +25,23 @@ public class TopicTests extends BaseTest {
|
||||||
private static final String SUITE_TITLE = "Topics";
|
private static final String SUITE_TITLE = "Topics";
|
||||||
private static final Topic TOPIC_FOR_UPDATE = new Topic()
|
private static final Topic TOPIC_FOR_UPDATE = new Topic()
|
||||||
.setName("topic-to-update")
|
.setName("topic-to-update")
|
||||||
.setCompactPolicyValue("Compact")
|
.setCleanupPolicyValue("Compact")
|
||||||
.setTimeToRetainData("604800001")
|
.setTimeToRetainData("604800001")
|
||||||
.setMaxSizeOnDisk("20 GB")
|
.setMaxSizeOnDisk("20 GB")
|
||||||
.setMaxMessageBytes("1000020")
|
.setMaxMessageBytes("1000020")
|
||||||
.setMessageKey(fileToString(System.getProperty("user.dir") + "/src/test/resources/producedkey.txt"))
|
.setMessageKey(fileToString(System.getProperty("user.dir") + "/src/test/resources/producedkey.txt"))
|
||||||
.setMessageContent(fileToString(System.getProperty("user.dir") + "/src/test/resources/testData.txt"));
|
.setMessageContent(fileToString(System.getProperty("user.dir") + "/src/test/resources/testData.txt"));
|
||||||
|
private static final Topic TOPIC_FOR_MESSAGES = new Topic()
|
||||||
|
.setName("topic-with-clean-message-attribute")
|
||||||
|
.setMessageKey(fileToString(System.getProperty("user.dir") + "/src/test/resources/producedkey.txt"))
|
||||||
|
.setMessageContent(fileToString(System.getProperty("user.dir") + "/src/test/resources/testData.txt"));
|
||||||
|
|
||||||
private static final Topic TOPIC_FOR_DELETE = new Topic().setName("topic-to-delete");
|
private static final Topic TOPIC_FOR_DELETE = new Topic().setName("topic-to-delete");
|
||||||
private static final List<Topic> TOPIC_LIST = new ArrayList<>();
|
private static final List<Topic> TOPIC_LIST = new ArrayList<>();
|
||||||
|
|
||||||
@BeforeAll
|
@BeforeAll
|
||||||
public void beforeAll() {
|
public void beforeAll() {
|
||||||
TOPIC_LIST.addAll(List.of(TOPIC_FOR_UPDATE, TOPIC_FOR_DELETE));
|
TOPIC_LIST.addAll(List.of(TOPIC_FOR_UPDATE, TOPIC_FOR_DELETE, TOPIC_FOR_MESSAGES));
|
||||||
TOPIC_LIST.forEach(topic -> apiHelper.createTopic(CLUSTER_NAME, topic.getName()));
|
TOPIC_LIST.forEach(topic -> apiHelper.createTopic(CLUSTER_NAME, topic.getName()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +88,7 @@ public class TopicTests extends BaseTest {
|
||||||
.openEditSettings();
|
.openEditSettings();
|
||||||
topicCreateEditForm
|
topicCreateEditForm
|
||||||
.waitUntilScreenReady()
|
.waitUntilScreenReady()
|
||||||
.selectCleanupPolicy(TOPIC_FOR_UPDATE.getCompactPolicyValue())
|
.selectCleanupPolicy(TOPIC_FOR_UPDATE.getCleanupPolicyValue())
|
||||||
.setMinInsyncReplicas(10)
|
.setMinInsyncReplicas(10)
|
||||||
.setTimeToRetainDataInMs(TOPIC_FOR_UPDATE.getTimeToRetainData())
|
.setTimeToRetainDataInMs(TOPIC_FOR_UPDATE.getTimeToRetainData())
|
||||||
.setMaxSizeOnDiskInGB(TOPIC_FOR_UPDATE.getMaxSizeOnDisk())
|
.setMaxSizeOnDiskInGB(TOPIC_FOR_UPDATE.getMaxSizeOnDisk())
|
||||||
|
@ -98,7 +105,7 @@ public class TopicTests extends BaseTest {
|
||||||
.waitUntilScreenReady()
|
.waitUntilScreenReady()
|
||||||
.openEditSettings();
|
.openEditSettings();
|
||||||
SoftAssertions softly = new SoftAssertions();
|
SoftAssertions softly = new SoftAssertions();
|
||||||
softly.assertThat(topicCreateEditForm.getCleanupPolicy()).as("Cleanup Policy").isEqualTo(TOPIC_FOR_UPDATE.getCompactPolicyValue());
|
softly.assertThat(topicCreateEditForm.getCleanupPolicy()).as("Cleanup Policy").isEqualTo(TOPIC_FOR_UPDATE.getCleanupPolicyValue());
|
||||||
softly.assertThat(topicCreateEditForm.getTimeToRetain()).as("Time to retain").isEqualTo(TOPIC_FOR_UPDATE.getTimeToRetainData());
|
softly.assertThat(topicCreateEditForm.getTimeToRetain()).as("Time to retain").isEqualTo(TOPIC_FOR_UPDATE.getTimeToRetainData());
|
||||||
softly.assertThat(topicCreateEditForm.getMaxSizeOnDisk()).as("Max size on disk").isEqualTo(TOPIC_FOR_UPDATE.getMaxSizeOnDisk());
|
softly.assertThat(topicCreateEditForm.getMaxSizeOnDisk()).as("Max size on disk").isEqualTo(TOPIC_FOR_UPDATE.getMaxSizeOnDisk());
|
||||||
softly.assertThat(topicCreateEditForm.getMaxMessageBytes()).as("Max message bytes").isEqualTo(TOPIC_FOR_UPDATE.getMaxMessageBytes());
|
softly.assertThat(topicCreateEditForm.getMaxMessageBytes()).as("Max message bytes").isEqualTo(TOPIC_FOR_UPDATE.getMaxMessageBytes());
|
||||||
|
@ -137,24 +144,55 @@ public class TopicTests extends BaseTest {
|
||||||
.openSideMenu(TOPICS);
|
.openSideMenu(TOPICS);
|
||||||
topicsList
|
topicsList
|
||||||
.waitUntilScreenReady()
|
.waitUntilScreenReady()
|
||||||
.openTopic(TOPIC_FOR_UPDATE.getName());
|
.openTopic(TOPIC_FOR_MESSAGES.getName());
|
||||||
topicDetails
|
topicDetails
|
||||||
.waitUntilScreenReady()
|
.waitUntilScreenReady()
|
||||||
.openTopicMenu(TopicDetails.TopicMenu.MESSAGES)
|
.openTopicMenu(TopicDetails.TopicMenu.MESSAGES)
|
||||||
.clickProduceMessageBtn();
|
.clickProduceMessageBtn();
|
||||||
produceMessagePanel
|
produceMessagePanel
|
||||||
.waitUntilScreenReady()
|
.waitUntilScreenReady()
|
||||||
.setContentFiled(TOPIC_FOR_UPDATE.getMessageContent())
|
.setContentFiled(TOPIC_FOR_MESSAGES.getMessageContent())
|
||||||
.setKeyField(TOPIC_FOR_UPDATE.getMessageKey())
|
.setKeyField(TOPIC_FOR_MESSAGES.getMessageKey())
|
||||||
.submitProduceMessage();
|
.submitProduceMessage();
|
||||||
topicDetails
|
topicDetails
|
||||||
.waitUntilScreenReady();
|
.waitUntilScreenReady();
|
||||||
SoftAssertions softly = new SoftAssertions();
|
SoftAssertions softly = new SoftAssertions();
|
||||||
softly.assertThat(topicDetails.isKeyMessageVisible((TOPIC_FOR_UPDATE.getMessageKey()))).withFailMessage("isKeyMessageVisible()").isTrue();
|
softly.assertThat(topicDetails.isKeyMessageVisible((TOPIC_FOR_MESSAGES.getMessageKey()))).withFailMessage("isKeyMessageVisible()").isTrue();
|
||||||
softly.assertThat(topicDetails.isContentMessageVisible((TOPIC_FOR_UPDATE.getMessageContent()).trim())).withFailMessage("isContentMessageVisible()").isTrue();
|
softly.assertThat(topicDetails.isContentMessageVisible((TOPIC_FOR_MESSAGES.getMessageContent()).trim())).withFailMessage("isContentMessageVisible()").isTrue();
|
||||||
softly.assertAll();
|
softly.assertAll();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Issue("Uncomment last assertion after bug https://github.com/provectus/kafka-ui/issues/2778 fix")
|
||||||
|
@DisplayName("clear message")
|
||||||
|
@Suite(suiteId = SUITE_ID, title = SUITE_TITLE)
|
||||||
|
@AutomationStatus(status = Status.AUTOMATED)
|
||||||
|
@CaseId(19)
|
||||||
|
@Test
|
||||||
|
void clearMessage() {
|
||||||
|
naviSideBar
|
||||||
|
.openSideMenu(TOPICS);
|
||||||
|
topicsList
|
||||||
|
.waitUntilScreenReady()
|
||||||
|
.openTopic(TOPIC_FOR_MESSAGES.getName());
|
||||||
|
topicDetails
|
||||||
|
.waitUntilScreenReady()
|
||||||
|
.openTopicMenu(TopicDetails.TopicMenu.OVERVIEW)
|
||||||
|
.clickProduceMessageBtn();
|
||||||
|
produceMessagePanel
|
||||||
|
.waitUntilScreenReady()
|
||||||
|
.setContentFiled(TOPIC_FOR_MESSAGES.getMessageContent())
|
||||||
|
.setKeyField(TOPIC_FOR_MESSAGES.getMessageKey())
|
||||||
|
.submitProduceMessage();
|
||||||
|
topicDetails
|
||||||
|
.waitUntilScreenReady();
|
||||||
|
String messageAmount = topicDetails.MessageCountAmount();
|
||||||
|
Assertions.assertEquals(messageAmount,topicDetails.MessageCountAmount());
|
||||||
|
topicDetails
|
||||||
|
.openDotPartitionIdMenu()
|
||||||
|
.clickClearMessagesBtn();
|
||||||
|
// Assertions.assertEquals(Integer.toString(Integer.valueOf(messageAmount)-1),topicDetails.MessageCountAmount());
|
||||||
|
}
|
||||||
|
|
||||||
@AfterAll
|
@AfterAll
|
||||||
public void afterAll() {
|
public void afterAll() {
|
||||||
TOPIC_LIST.forEach(topic -> apiHelper.deleteTopic(CLUSTER_NAME, topic.getName()));
|
TOPIC_LIST.forEach(topic -> apiHelper.deleteTopic(CLUSTER_NAME, topic.getName()));
|
||||||
|
|
|
@ -66,4 +66,4 @@ pnpm start
|
||||||
```
|
```
|
||||||
## Links
|
## Links
|
||||||
|
|
||||||
* [Create React App](https://github.com/facebook/create-react-app)
|
* [Vite](https://github.com/vitejs/vite)
|
||||||
|
|
|
@ -16,7 +16,6 @@
|
||||||
"@tanstack/react-table": "^8.5.10",
|
"@tanstack/react-table": "^8.5.10",
|
||||||
"@testing-library/react": "^13.2.0",
|
"@testing-library/react": "^13.2.0",
|
||||||
"@types/testing-library__jest-dom": "^5.14.5",
|
"@types/testing-library__jest-dom": "^5.14.5",
|
||||||
"@types/yup": "^0.29.13",
|
|
||||||
"@vitejs/plugin-react": "^2.0.0",
|
"@vitejs/plugin-react": "^2.0.0",
|
||||||
"ace-builds": "^1.7.1",
|
"ace-builds": "^1.7.1",
|
||||||
"ajv": "^8.6.3",
|
"ajv": "^8.6.3",
|
||||||
|
@ -47,7 +46,7 @@
|
||||||
"vite": "^3.0.2",
|
"vite": "^3.0.2",
|
||||||
"vite-tsconfig-paths": "^3.5.0",
|
"vite-tsconfig-paths": "^3.5.0",
|
||||||
"whatwg-fetch": "^3.6.2",
|
"whatwg-fetch": "^3.6.2",
|
||||||
"yup": "^0.32.9",
|
"yup": "^0.32.11",
|
||||||
"zustand": "^4.1.1"
|
"zustand": "^4.1.1"
|
||||||
},
|
},
|
||||||
"lint-staged": {
|
"lint-staged": {
|
||||||
|
@ -83,7 +82,7 @@
|
||||||
"@openapitools/openapi-generator-cli": "^2.5.1",
|
"@openapitools/openapi-generator-cli": "^2.5.1",
|
||||||
"@testing-library/dom": "^8.11.1",
|
"@testing-library/dom": "^8.11.1",
|
||||||
"@testing-library/jest-dom": "^5.16.4",
|
"@testing-library/jest-dom": "^5.16.4",
|
||||||
"@testing-library/user-event": "^13.5.0",
|
"@testing-library/user-event": "^14.4.3",
|
||||||
"@types/eventsource": "^1.1.8",
|
"@types/eventsource": "^1.1.8",
|
||||||
"@types/jest": "^29.0.1",
|
"@types/jest": "^29.0.1",
|
||||||
"@types/lodash": "^4.14.172",
|
"@types/lodash": "^4.14.172",
|
||||||
|
|
19
kafka-ui-react-app/pnpm-lock.yaml
generated
19
kafka-ui-react-app/pnpm-lock.yaml
generated
|
@ -19,7 +19,7 @@ specifiers:
|
||||||
'@testing-library/dom': ^8.11.1
|
'@testing-library/dom': ^8.11.1
|
||||||
'@testing-library/jest-dom': ^5.16.4
|
'@testing-library/jest-dom': ^5.16.4
|
||||||
'@testing-library/react': ^13.2.0
|
'@testing-library/react': ^13.2.0
|
||||||
'@testing-library/user-event': ^13.5.0
|
'@testing-library/user-event': ^14.4.3
|
||||||
'@types/eventsource': ^1.1.8
|
'@types/eventsource': ^1.1.8
|
||||||
'@types/jest': ^29.0.1
|
'@types/jest': ^29.0.1
|
||||||
'@types/lodash': ^4.14.172
|
'@types/lodash': ^4.14.172
|
||||||
|
@ -30,7 +30,6 @@ specifiers:
|
||||||
'@types/react-router-dom': ^5.3.3
|
'@types/react-router-dom': ^5.3.3
|
||||||
'@types/styled-components': ^5.1.13
|
'@types/styled-components': ^5.1.13
|
||||||
'@types/testing-library__jest-dom': ^5.14.5
|
'@types/testing-library__jest-dom': ^5.14.5
|
||||||
'@types/yup': ^0.29.13
|
|
||||||
'@typescript-eslint/eslint-plugin': ^5.29.0
|
'@typescript-eslint/eslint-plugin': ^5.29.0
|
||||||
'@typescript-eslint/parser': ^5.29.0
|
'@typescript-eslint/parser': ^5.29.0
|
||||||
'@vitejs/plugin-react': ^2.0.0
|
'@vitejs/plugin-react': ^2.0.0
|
||||||
|
@ -88,7 +87,7 @@ specifiers:
|
||||||
vite: ^3.0.2
|
vite: ^3.0.2
|
||||||
vite-tsconfig-paths: ^3.5.0
|
vite-tsconfig-paths: ^3.5.0
|
||||||
whatwg-fetch: ^3.6.2
|
whatwg-fetch: ^3.6.2
|
||||||
yup: ^0.32.9
|
yup: ^0.32.11
|
||||||
zustand: ^4.1.1
|
zustand: ^4.1.1
|
||||||
|
|
||||||
dependencies:
|
dependencies:
|
||||||
|
@ -104,7 +103,6 @@ dependencies:
|
||||||
'@tanstack/react-table': 8.5.10_ef5jwxihqo6n7gxfmzogljlgcm
|
'@tanstack/react-table': 8.5.10_ef5jwxihqo6n7gxfmzogljlgcm
|
||||||
'@testing-library/react': 13.2.0_ef5jwxihqo6n7gxfmzogljlgcm
|
'@testing-library/react': 13.2.0_ef5jwxihqo6n7gxfmzogljlgcm
|
||||||
'@types/testing-library__jest-dom': 5.14.5
|
'@types/testing-library__jest-dom': 5.14.5
|
||||||
'@types/yup': 0.29.13
|
|
||||||
'@vitejs/plugin-react': 2.0.0_vite@3.0.2
|
'@vitejs/plugin-react': 2.0.0_vite@3.0.2
|
||||||
ace-builds: 1.7.1
|
ace-builds: 1.7.1
|
||||||
ajv: 8.8.2
|
ajv: 8.8.2
|
||||||
|
@ -146,7 +144,7 @@ devDependencies:
|
||||||
'@openapitools/openapi-generator-cli': 2.5.1
|
'@openapitools/openapi-generator-cli': 2.5.1
|
||||||
'@testing-library/dom': 8.13.0
|
'@testing-library/dom': 8.13.0
|
||||||
'@testing-library/jest-dom': 5.16.4
|
'@testing-library/jest-dom': 5.16.4
|
||||||
'@testing-library/user-event': 13.5.0_tlwynutqiyp5mns3woioasuxnq
|
'@testing-library/user-event': 14.4.3_tlwynutqiyp5mns3woioasuxnq
|
||||||
'@types/eventsource': 1.1.8
|
'@types/eventsource': 1.1.8
|
||||||
'@types/jest': 29.0.1
|
'@types/jest': 29.0.1
|
||||||
'@types/lodash': 4.14.177
|
'@types/lodash': 4.14.177
|
||||||
|
@ -3339,13 +3337,12 @@ packages:
|
||||||
react-dom: 18.1.0_react@18.1.0
|
react-dom: 18.1.0_react@18.1.0
|
||||||
dev: false
|
dev: false
|
||||||
|
|
||||||
/@testing-library/user-event/13.5.0_tlwynutqiyp5mns3woioasuxnq:
|
/@testing-library/user-event/14.4.3_tlwynutqiyp5mns3woioasuxnq:
|
||||||
resolution: {integrity: sha512-5Kwtbo3Y/NowpkbRuSepbyMFkZmHgD+vPzYB/RJ4oxt5Gj/avFFBYjhw27cqSVPVw/3a67NK1PbiIr9k4Gwmdg==}
|
resolution: {integrity: sha512-kCUc5MEwaEMakkO5x7aoD+DLi02ehmEM2QCGWvNqAS1dV/fAvORWEjnjsEIvml59M7Y5kCkWN6fCCyPOe8OL6Q==}
|
||||||
engines: {node: '>=10', npm: '>=6'}
|
engines: {node: '>=12', npm: '>=6'}
|
||||||
peerDependencies:
|
peerDependencies:
|
||||||
'@testing-library/dom': '>=7.21.4'
|
'@testing-library/dom': '>=7.21.4'
|
||||||
dependencies:
|
dependencies:
|
||||||
'@babel/runtime': 7.17.9
|
|
||||||
'@testing-library/dom': 8.13.0
|
'@testing-library/dom': 8.13.0
|
||||||
dev: true
|
dev: true
|
||||||
|
|
||||||
|
@ -3546,10 +3543,6 @@ packages:
|
||||||
dependencies:
|
dependencies:
|
||||||
'@types/yargs-parser': 20.2.0
|
'@types/yargs-parser': 20.2.0
|
||||||
|
|
||||||
/@types/yup/0.29.13:
|
|
||||||
resolution: {integrity: sha512-qRyuv+P/1t1JK1rA+elmK1MmCL1BapEzKKfbEhDBV/LMMse4lmhZ/XbgETI39JveDJRpLjmToOI6uFtMW/WR2g==}
|
|
||||||
dev: false
|
|
||||||
|
|
||||||
/@typescript-eslint/eslint-plugin/5.29.0_uaxwak76nssfibsnotx5epygnu:
|
/@typescript-eslint/eslint-plugin/5.29.0_uaxwak76nssfibsnotx5epygnu:
|
||||||
resolution: {integrity: sha512-kgTsISt9pM53yRFQmLZ4npj99yGl3x3Pl7z4eA66OuTzAGC4bQB5H5fuLwPnqTKU3yyrrg4MIhjF17UYnL4c0w==}
|
resolution: {integrity: sha512-kgTsISt9pM53yRFQmLZ4npj99yGl3x3Pl7z4eA66OuTzAGC4bQB5H5fuLwPnqTKU3yyrrg4MIhjF17UYnL4c0w==}
|
||||||
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
|
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
|
||||||
|
|
|
@ -2,7 +2,6 @@ import React from 'react';
|
||||||
import { render, WithRoute } from 'lib/testHelpers';
|
import { render, WithRoute } from 'lib/testHelpers';
|
||||||
import { screen } from '@testing-library/dom';
|
import { screen } from '@testing-library/dom';
|
||||||
import { clusterBrokerPath } from 'lib/paths';
|
import { clusterBrokerPath } from 'lib/paths';
|
||||||
import { act } from '@testing-library/react';
|
|
||||||
import { brokerLogDirsPayload } from 'lib/fixtures/brokers';
|
import { brokerLogDirsPayload } from 'lib/fixtures/brokers';
|
||||||
import { useBrokerLogDirs } from 'lib/hooks/api/brokers';
|
import { useBrokerLogDirs } from 'lib/hooks/api/brokers';
|
||||||
import { BrokerLogdirs } from 'generated-sources';
|
import { BrokerLogdirs } from 'generated-sources';
|
||||||
|
@ -20,16 +19,14 @@ describe('BrokerLogdir Component', () => {
|
||||||
(useBrokerLogDirs as jest.Mock).mockImplementation(() => ({
|
(useBrokerLogDirs as jest.Mock).mockImplementation(() => ({
|
||||||
data: payload,
|
data: payload,
|
||||||
}));
|
}));
|
||||||
await act(() => {
|
await render(
|
||||||
render(
|
<WithRoute path={clusterBrokerPath()}>
|
||||||
<WithRoute path={clusterBrokerPath()}>
|
<BrokerLogdir />
|
||||||
<BrokerLogdir />
|
</WithRoute>,
|
||||||
</WithRoute>,
|
{
|
||||||
{
|
initialEntries: [clusterBrokerPath(clusterName, brokerId)],
|
||||||
initialEntries: [clusterBrokerPath(clusterName, brokerId)],
|
}
|
||||||
}
|
);
|
||||||
);
|
|
||||||
});
|
|
||||||
};
|
};
|
||||||
|
|
||||||
it('shows warning when server returns undefined logDirs response', async () => {
|
it('shows warning when server returns undefined logDirs response', async () => {
|
||||||
|
|
|
@ -6,7 +6,6 @@ import { useBrokerConfig } from 'lib/hooks/api/brokers';
|
||||||
import { brokerConfigPayload } from 'lib/fixtures/brokers';
|
import { brokerConfigPayload } from 'lib/fixtures/brokers';
|
||||||
import Configs from 'components/Brokers/Broker/Configs/Configs';
|
import Configs from 'components/Brokers/Broker/Configs/Configs';
|
||||||
import userEvent from '@testing-library/user-event';
|
import userEvent from '@testing-library/user-event';
|
||||||
import { act } from '@testing-library/react';
|
|
||||||
|
|
||||||
const clusterName = 'Cluster_Name';
|
const clusterName = 'Cluster_Name';
|
||||||
const brokerId = 'Broker_Id';
|
const brokerId = 'Broker_Id';
|
||||||
|
@ -42,9 +41,7 @@ describe('Configs', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('updates textbox value', async () => {
|
it('updates textbox value', async () => {
|
||||||
await act(() => {
|
await userEvent.click(screen.getAllByLabelText('editAction')[0]);
|
||||||
userEvent.click(screen.getAllByLabelText('editAction')[0]);
|
|
||||||
});
|
|
||||||
|
|
||||||
const textbox = screen.getByLabelText('inputValue');
|
const textbox = screen.getByLabelText('inputValue');
|
||||||
expect(textbox).toBeInTheDocument();
|
expect(textbox).toBeInTheDocument();
|
||||||
|
@ -59,9 +56,9 @@ describe('Configs', () => {
|
||||||
screen.getByRole('button', { name: 'cancelAction' })
|
screen.getByRole('button', { name: 'cancelAction' })
|
||||||
).toBeInTheDocument();
|
).toBeInTheDocument();
|
||||||
|
|
||||||
await act(() => {
|
await userEvent.click(
|
||||||
userEvent.click(screen.getByRole('button', { name: 'confirmAction' }));
|
screen.getByRole('button', { name: 'confirmAction' })
|
||||||
});
|
);
|
||||||
|
|
||||||
expect(
|
expect(
|
||||||
screen.getByText('Are you sure you want to change the value?')
|
screen.getByText('Are you sure you want to change the value?')
|
||||||
|
|
|
@ -2,7 +2,6 @@ import React from 'react';
|
||||||
import { render, WithRoute } from 'lib/testHelpers';
|
import { render, WithRoute } from 'lib/testHelpers';
|
||||||
import { screen, waitFor } from '@testing-library/dom';
|
import { screen, waitFor } from '@testing-library/dom';
|
||||||
import { clusterBrokerPath, clusterBrokersPath } from 'lib/paths';
|
import { clusterBrokerPath, clusterBrokersPath } from 'lib/paths';
|
||||||
import { act } from '@testing-library/react';
|
|
||||||
import BrokersList from 'components/Brokers/BrokersList/BrokersList';
|
import BrokersList from 'components/Brokers/BrokersList/BrokersList';
|
||||||
import userEvent from '@testing-library/user-event';
|
import userEvent from '@testing-library/user-event';
|
||||||
import { useBrokers } from 'lib/hooks/api/brokers';
|
import { useBrokers } from 'lib/hooks/api/brokers';
|
||||||
|
@ -57,9 +56,8 @@ describe('BrokersList Component', () => {
|
||||||
});
|
});
|
||||||
it('opens broker when row clicked', async () => {
|
it('opens broker when row clicked', async () => {
|
||||||
renderComponent();
|
renderComponent();
|
||||||
await act(() => {
|
await userEvent.click(screen.getByRole('cell', { name: '0' }));
|
||||||
userEvent.click(screen.getByRole('cell', { name: '0' }));
|
|
||||||
});
|
|
||||||
await waitFor(() =>
|
await waitFor(() =>
|
||||||
expect(mockedUsedNavigate).toBeCalledWith(
|
expect(mockedUsedNavigate).toBeCalledWith(
|
||||||
clusterBrokerPath(clusterName, '0')
|
clusterBrokerPath(clusterName, '0')
|
||||||
|
|
|
@ -6,169 +6,20 @@ export const brokerMetricsPayload: BrokerMetrics = {
|
||||||
metrics: [
|
metrics: [
|
||||||
{
|
{
|
||||||
name: 'TotalFetchRequestsPerSec',
|
name: 'TotalFetchRequestsPerSec',
|
||||||
canonicalName:
|
labels: {
|
||||||
'kafka.server:name=TotalFetchRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics',
|
canonicalName:
|
||||||
params: {
|
'kafka.server:name=TotalFetchRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics',
|
||||||
topic: '_connect_status',
|
|
||||||
name: 'TotalFetchRequestsPerSec',
|
|
||||||
type: 'BrokerTopicMetrics',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
OneMinuteRate: 19.408369293127542,
|
|
||||||
FifteenMinuteRate: 19.44631556589501,
|
|
||||||
Count: 191615,
|
|
||||||
FiveMinuteRate: 19.464393718807774,
|
|
||||||
MeanRate: 19.4233855043407,
|
|
||||||
},
|
},
|
||||||
|
value: 10,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: 'ZooKeeperRequestLatencyMs',
|
name: 'ZooKeeperRequestLatencyMs',
|
||||||
canonicalName:
|
value: 11,
|
||||||
'kafka.server:name=ZooKeeperRequestLatencyMs,type=ZooKeeperClientMetrics',
|
|
||||||
params: {
|
|
||||||
name: 'ZooKeeperRequestLatencyMs',
|
|
||||||
type: 'ZooKeeperClientMetrics',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
Mean: 4.907351022183558,
|
|
||||||
StdDev: 10.589608223906348,
|
|
||||||
'75thPercentile': 2,
|
|
||||||
'98thPercentile': 10,
|
|
||||||
Min: 0,
|
|
||||||
'95thPercentile': 5,
|
|
||||||
'99thPercentile': 15,
|
|
||||||
Max: 151,
|
|
||||||
'999thPercentile': 92.79700000000003,
|
|
||||||
Count: 2301,
|
|
||||||
'50thPercentile': 1,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: 'RequestHandlerAvgIdlePercent',
|
name: 'RequestHandlerAvgIdlePercent',
|
||||||
canonicalName:
|
|
||||||
'kafka.server:name=RequestHandlerAvgIdlePercent,type=KafkaRequestHandlerPool',
|
|
||||||
params: {
|
|
||||||
name: 'RequestHandlerAvgIdlePercent',
|
|
||||||
type: 'KafkaRequestHandlerPool',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
OneMinuteRate: 0.9999008788765713,
|
|
||||||
FifteenMinuteRate: 0.9983845959639047,
|
|
||||||
Count: 9937344680371,
|
|
||||||
FiveMinuteRate: 0.9986337207880311,
|
|
||||||
MeanRate: 0.9971616923696525,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'BytesInPerSec',
|
|
||||||
canonicalName:
|
|
||||||
'kafka.server:name=BytesInPerSec,topic=_connect_status,type=BrokerTopicMetrics',
|
|
||||||
params: {
|
|
||||||
topic: '_connect_status',
|
|
||||||
name: 'BytesInPerSec',
|
|
||||||
type: 'BrokerTopicMetrics',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
OneMinuteRate: 0,
|
|
||||||
FifteenMinuteRate: 0,
|
|
||||||
Count: 0,
|
|
||||||
FiveMinuteRate: 0,
|
|
||||||
MeanRate: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'FetchMessageConversionsPerSec',
|
|
||||||
canonicalName:
|
|
||||||
'kafka.server:name=FetchMessageConversionsPerSec,topic=__consumer_offsets,type=BrokerTopicMetrics',
|
|
||||||
params: {
|
|
||||||
topic: '__consumer_offsets',
|
|
||||||
name: 'FetchMessageConversionsPerSec',
|
|
||||||
type: 'BrokerTopicMetrics',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
OneMinuteRate: 0,
|
|
||||||
FifteenMinuteRate: 0,
|
|
||||||
Count: 0,
|
|
||||||
FiveMinuteRate: 0,
|
|
||||||
MeanRate: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'TotalProduceRequestsPerSec',
|
|
||||||
canonicalName:
|
|
||||||
'kafka.server:name=TotalProduceRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics',
|
|
||||||
params: {
|
|
||||||
topic: '_connect_status',
|
|
||||||
name: 'TotalProduceRequestsPerSec',
|
|
||||||
type: 'BrokerTopicMetrics',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
OneMinuteRate: 0,
|
|
||||||
FifteenMinuteRate: 0,
|
|
||||||
Count: 0,
|
|
||||||
FiveMinuteRate: 0,
|
|
||||||
MeanRate: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'MaxLag',
|
|
||||||
canonicalName:
|
|
||||||
'kafka.server:clientId=Replica,name=MaxLag,type=ReplicaFetcherManager',
|
|
||||||
params: {
|
|
||||||
clientId: 'Replica',
|
|
||||||
name: 'MaxLag',
|
|
||||||
type: 'ReplicaFetcherManager',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
Value: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'UnderMinIsrPartitionCount',
|
|
||||||
canonicalName:
|
|
||||||
'kafka.server:name=UnderMinIsrPartitionCount,type=ReplicaManager',
|
|
||||||
params: {
|
|
||||||
name: 'UnderMinIsrPartitionCount',
|
|
||||||
type: 'ReplicaManager',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
Value: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'ZooKeeperDisconnectsPerSec',
|
|
||||||
canonicalName:
|
|
||||||
'kafka.server:name=ZooKeeperDisconnectsPerSec,type=SessionExpireListener',
|
|
||||||
params: {
|
|
||||||
name: 'ZooKeeperDisconnectsPerSec',
|
|
||||||
type: 'SessionExpireListener',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
OneMinuteRate: 0,
|
|
||||||
FifteenMinuteRate: 0,
|
|
||||||
Count: 0,
|
|
||||||
FiveMinuteRate: 0,
|
|
||||||
MeanRate: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: 'BytesInPerSec',
|
|
||||||
canonicalName:
|
|
||||||
'kafka.server:name=BytesInPerSec,topic=__confluent.support.metrics,type=BrokerTopicMetrics',
|
|
||||||
params: {
|
|
||||||
topic: '__confluent.support.metrics',
|
|
||||||
name: 'BytesInPerSec',
|
|
||||||
type: 'BrokerTopicMetrics',
|
|
||||||
},
|
|
||||||
value: {
|
|
||||||
OneMinuteRate: 3.093893673470914e-70,
|
|
||||||
FifteenMinuteRate: 0.004057932469784932,
|
|
||||||
Count: 1263,
|
|
||||||
FiveMinuteRate: 1.047243693828501e-12,
|
|
||||||
MeanRate: 0.12704831069266603,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
export const transformedBrokerMetricsPayload =
|
export const transformedBrokerMetricsPayload =
|
||||||
'{"segmentSize":23,"segmentCount":23,"metrics":[{"name":"TotalFetchRequestsPerSec","canonicalName":"kafka.server:name=TotalFetchRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics","params":{"topic":"_connect_status","name":"TotalFetchRequestsPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":19.408369293127542,"FifteenMinuteRate":19.44631556589501,"Count":191615,"FiveMinuteRate":19.464393718807774,"MeanRate":19.4233855043407}},{"name":"ZooKeeperRequestLatencyMs","canonicalName":"kafka.server:name=ZooKeeperRequestLatencyMs,type=ZooKeeperClientMetrics","params":{"name":"ZooKeeperRequestLatencyMs","type":"ZooKeeperClientMetrics"},"value":{"Mean":4.907351022183558,"StdDev":10.589608223906348,"75thPercentile":2,"98thPercentile":10,"Min":0,"95thPercentile":5,"99thPercentile":15,"Max":151,"999thPercentile":92.79700000000003,"Count":2301,"50thPercentile":1}},{"name":"RequestHandlerAvgIdlePercent","canonicalName":"kafka.server:name=RequestHandlerAvgIdlePercent,type=KafkaRequestHandlerPool","params":{"name":"RequestHandlerAvgIdlePercent","type":"KafkaRequestHandlerPool"},"value":{"OneMinuteRate":0.9999008788765713,"FifteenMinuteRate":0.9983845959639047,"Count":9937344680371,"FiveMinuteRate":0.9986337207880311,"MeanRate":0.9971616923696525}},{"name":"BytesInPerSec","canonicalName":"kafka.server:name=BytesInPerSec,topic=_connect_status,type=BrokerTopicMetrics","params":{"topic":"_connect_status","name":"BytesInPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":0,"FifteenMinuteRate":0,"Count":0,"FiveMinuteRate":0,"MeanRate":0}},{"name":"FetchMessageConversionsPerSec","canonicalName":"kafka.server:name=FetchMessageConversionsPerSec,topic=__consumer_offsets,type=BrokerTopicMetrics","params":{"topic":"__consumer_offsets","name":"FetchMessageConversionsPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":0,"FifteenMinuteRate":0,"Count":0,"FiveMinuteRate":0,"MeanRate":0}},{"name":"TotalProduceRequestsPerSec","canonicalName":"kafka.server:name=TotalProduceRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics","params":{"topic":"_connect_status","name":"TotalProduceRequestsPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":0,"FifteenMinuteRate":0,"Count":0,"FiveMinuteRate":0,"MeanRate":0}},{"name":"MaxLag","canonicalName":"kafka.server:clientId=Replica,name=MaxLag,type=ReplicaFetcherManager","params":{"clientId":"Replica","name":"MaxLag","type":"ReplicaFetcherManager"},"value":{"Value":0}},{"name":"UnderMinIsrPartitionCount","canonicalName":"kafka.server:name=UnderMinIsrPartitionCount,type=ReplicaManager","params":{"name":"UnderMinIsrPartitionCount","type":"ReplicaManager"},"value":{"Value":0}},{"name":"ZooKeeperDisconnectsPerSec","canonicalName":"kafka.server:name=ZooKeeperDisconnectsPerSec,type=SessionExpireListener","params":{"name":"ZooKeeperDisconnectsPerSec","type":"SessionExpireListener"},"value":{"OneMinuteRate":0,"FifteenMinuteRate":0,"Count":0,"FiveMinuteRate":0,"MeanRate":0}},{"name":"BytesInPerSec","canonicalName":"kafka.server:name=BytesInPerSec,topic=__confluent.support.metrics,type=BrokerTopicMetrics","params":{"topic":"__confluent.support.metrics","name":"BytesInPerSec","type":"BrokerTopicMetrics"},"value":{"OneMinuteRate":3.093893673470914e-70,"FifteenMinuteRate":0.004057932469784932,"Count":1263,"FiveMinuteRate":1.047243693828501e-12,"MeanRate":0.12704831069266603}}]}';
|
'{"segmentSize":23,"segmentCount":23,"metrics":[{"name":"TotalFetchRequestsPerSec","labels":{"canonicalName":"kafka.server:name=TotalFetchRequestsPerSec,topic=_connect_status,type=BrokerTopicMetrics"},"value":10},{"name":"ZooKeeperRequestLatencyMs","value":11},{"name":"RequestHandlerAvgIdlePercent"}]}';
|
||||||
|
|
|
@ -13,7 +13,6 @@ import {
|
||||||
clusterSchemasPath,
|
clusterSchemasPath,
|
||||||
clusterTopicsPath,
|
clusterTopicsPath,
|
||||||
} from 'lib/paths';
|
} from 'lib/paths';
|
||||||
import { act } from 'react-dom/test-utils';
|
|
||||||
import { useClusters } from 'lib/hooks/api/clusters';
|
import { useClusters } from 'lib/hooks/api/clusters';
|
||||||
import { onlineClusterPayload } from 'lib/fixtures/clusters';
|
import { onlineClusterPayload } from 'lib/fixtures/clusters';
|
||||||
|
|
||||||
|
@ -54,14 +53,12 @@ describe('Cluster', () => {
|
||||||
(useClusters as jest.Mock).mockImplementation(() => ({
|
(useClusters as jest.Mock).mockImplementation(() => ({
|
||||||
data: payload,
|
data: payload,
|
||||||
}));
|
}));
|
||||||
await act(() => {
|
await render(
|
||||||
render(
|
<WithRoute path={`${clusterPath()}/*`}>
|
||||||
<WithRoute path={`${clusterPath()}/*`}>
|
<ClusterComponent />
|
||||||
<ClusterComponent />
|
</WithRoute>,
|
||||||
</WithRoute>,
|
{ initialEntries: [pathname] }
|
||||||
{ initialEntries: [pathname] }
|
);
|
||||||
);
|
|
||||||
});
|
|
||||||
};
|
};
|
||||||
|
|
||||||
it('renders Brokers', async () => {
|
it('renders Brokers', async () => {
|
||||||
|
|
|
@ -33,10 +33,10 @@ const expectActionButtonsExists = () => {
|
||||||
expect(screen.getByText('Restart Failed Tasks')).toBeInTheDocument();
|
expect(screen.getByText('Restart Failed Tasks')).toBeInTheDocument();
|
||||||
expect(screen.getByText('Delete')).toBeInTheDocument();
|
expect(screen.getByText('Delete')).toBeInTheDocument();
|
||||||
};
|
};
|
||||||
const afterClickDropDownButton = () => {
|
const afterClickDropDownButton = async () => {
|
||||||
const dropDownButton = screen.getAllByRole('button');
|
const dropDownButton = screen.getAllByRole('button');
|
||||||
expect(dropDownButton.length).toEqual(1);
|
expect(dropDownButton.length).toEqual(1);
|
||||||
userEvent.click(dropDownButton[0]);
|
await userEvent.click(dropDownButton[0]);
|
||||||
};
|
};
|
||||||
describe('Actions', () => {
|
describe('Actions', () => {
|
||||||
afterEach(() => {
|
afterEach(() => {
|
||||||
|
@ -61,48 +61,48 @@ describe('Actions', () => {
|
||||||
{ initialEntries: [path] }
|
{ initialEntries: [path] }
|
||||||
);
|
);
|
||||||
|
|
||||||
it('renders buttons when paused', () => {
|
it('renders buttons when paused', async () => {
|
||||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||||
data: set({ ...connector }, 'status.state', ConnectorState.PAUSED),
|
data: set({ ...connector }, 'status.state', ConnectorState.PAUSED),
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
expect(screen.getAllByRole('menuitem').length).toEqual(5);
|
expect(screen.getAllByRole('menuitem').length).toEqual(5);
|
||||||
expect(screen.getByText('Resume')).toBeInTheDocument();
|
expect(screen.getByText('Resume')).toBeInTheDocument();
|
||||||
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
||||||
expectActionButtonsExists();
|
expectActionButtonsExists();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders buttons when failed', () => {
|
it('renders buttons when failed', async () => {
|
||||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||||
data: set({ ...connector }, 'status.state', ConnectorState.FAILED),
|
data: set({ ...connector }, 'status.state', ConnectorState.FAILED),
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
expect(screen.getAllByRole('menuitem').length).toEqual(4);
|
expect(screen.getAllByRole('menuitem').length).toEqual(4);
|
||||||
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
||||||
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
||||||
expectActionButtonsExists();
|
expectActionButtonsExists();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders buttons when unassigned', () => {
|
it('renders buttons when unassigned', async () => {
|
||||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||||
data: set({ ...connector }, 'status.state', ConnectorState.UNASSIGNED),
|
data: set({ ...connector }, 'status.state', ConnectorState.UNASSIGNED),
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
expect(screen.getAllByRole('menuitem').length).toEqual(4);
|
expect(screen.getAllByRole('menuitem').length).toEqual(4);
|
||||||
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
||||||
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
expect(screen.queryByText('Pause')).not.toBeInTheDocument();
|
||||||
expectActionButtonsExists();
|
expectActionButtonsExists();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders buttons when running connector action', () => {
|
it('renders buttons when running connector action', async () => {
|
||||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||||
data: set({ ...connector }, 'status.state', ConnectorState.RUNNING),
|
data: set({ ...connector }, 'status.state', ConnectorState.RUNNING),
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
expect(screen.getAllByRole('menuitem').length).toEqual(5);
|
expect(screen.getAllByRole('menuitem').length).toEqual(5);
|
||||||
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
expect(screen.queryByText('Resume')).not.toBeInTheDocument();
|
||||||
expect(screen.getByText('Pause')).toBeInTheDocument();
|
expect(screen.getByText('Pause')).toBeInTheDocument();
|
||||||
|
@ -118,34 +118,34 @@ describe('Actions', () => {
|
||||||
|
|
||||||
it('opens confirmation modal when delete button clicked', async () => {
|
it('opens confirmation modal when delete button clicked', async () => {
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
await waitFor(() =>
|
await waitFor(async () =>
|
||||||
userEvent.click(screen.getByRole('menuitem', { name: 'Delete' }))
|
userEvent.click(screen.getByRole('menuitem', { name: 'Delete' }))
|
||||||
);
|
);
|
||||||
expect(screen.getByRole('dialog')).toBeInTheDocument();
|
expect(screen.getByRole('dialog')).toBeInTheDocument();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('calls restartConnector when restart button clicked', () => {
|
it('calls restartConnector when restart button clicked', async () => {
|
||||||
const restartConnector = jest.fn();
|
const restartConnector = jest.fn();
|
||||||
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
||||||
mutateAsync: restartConnector,
|
mutateAsync: restartConnector,
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
userEvent.click(
|
await userEvent.click(
|
||||||
screen.getByRole('menuitem', { name: 'Restart Connector' })
|
screen.getByRole('menuitem', { name: 'Restart Connector' })
|
||||||
);
|
);
|
||||||
expect(restartConnector).toHaveBeenCalledWith(ConnectorAction.RESTART);
|
expect(restartConnector).toHaveBeenCalledWith(ConnectorAction.RESTART);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('calls restartAllTasks', () => {
|
it('calls restartAllTasks', async () => {
|
||||||
const restartAllTasks = jest.fn();
|
const restartAllTasks = jest.fn();
|
||||||
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
||||||
mutateAsync: restartAllTasks,
|
mutateAsync: restartAllTasks,
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
userEvent.click(
|
await userEvent.click(
|
||||||
screen.getByRole('menuitem', { name: 'Restart All Tasks' })
|
screen.getByRole('menuitem', { name: 'Restart All Tasks' })
|
||||||
);
|
);
|
||||||
expect(restartAllTasks).toHaveBeenCalledWith(
|
expect(restartAllTasks).toHaveBeenCalledWith(
|
||||||
|
@ -153,14 +153,14 @@ describe('Actions', () => {
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('calls restartFailedTasks', () => {
|
it('calls restartFailedTasks', async () => {
|
||||||
const restartFailedTasks = jest.fn();
|
const restartFailedTasks = jest.fn();
|
||||||
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
||||||
mutateAsync: restartFailedTasks,
|
mutateAsync: restartFailedTasks,
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
userEvent.click(
|
await userEvent.click(
|
||||||
screen.getByRole('menuitem', { name: 'Restart Failed Tasks' })
|
screen.getByRole('menuitem', { name: 'Restart Failed Tasks' })
|
||||||
);
|
);
|
||||||
expect(restartFailedTasks).toHaveBeenCalledWith(
|
expect(restartFailedTasks).toHaveBeenCalledWith(
|
||||||
|
@ -168,18 +168,18 @@ describe('Actions', () => {
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('calls pauseConnector when pause button clicked', () => {
|
it('calls pauseConnector when pause button clicked', async () => {
|
||||||
const pauseConnector = jest.fn();
|
const pauseConnector = jest.fn();
|
||||||
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
(useUpdateConnectorState as jest.Mock).mockImplementation(() => ({
|
||||||
mutateAsync: pauseConnector,
|
mutateAsync: pauseConnector,
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
userEvent.click(screen.getByRole('menuitem', { name: 'Pause' }));
|
await userEvent.click(screen.getByRole('menuitem', { name: 'Pause' }));
|
||||||
expect(pauseConnector).toHaveBeenCalledWith(ConnectorAction.PAUSE);
|
expect(pauseConnector).toHaveBeenCalledWith(ConnectorAction.PAUSE);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('calls resumeConnector when resume button clicked', () => {
|
it('calls resumeConnector when resume button clicked', async () => {
|
||||||
const resumeConnector = jest.fn();
|
const resumeConnector = jest.fn();
|
||||||
(useConnector as jest.Mock).mockImplementation(() => ({
|
(useConnector as jest.Mock).mockImplementation(() => ({
|
||||||
data: set({ ...connector }, 'status.state', ConnectorState.PAUSED),
|
data: set({ ...connector }, 'status.state', ConnectorState.PAUSED),
|
||||||
|
@ -188,8 +188,8 @@ describe('Actions', () => {
|
||||||
mutateAsync: resumeConnector,
|
mutateAsync: resumeConnector,
|
||||||
}));
|
}));
|
||||||
renderComponent();
|
renderComponent();
|
||||||
afterClickDropDownButton();
|
await afterClickDropDownButton();
|
||||||
userEvent.click(screen.getByRole('menuitem', { name: 'Resume' }));
|
await userEvent.click(screen.getByRole('menuitem', { name: 'Resume' }));
|
||||||
expect(resumeConnector).toHaveBeenCalledWith(ConnectorAction.RESUME);
|
expect(resumeConnector).toHaveBeenCalledWith(ConnectorAction.RESUME);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -57,7 +57,7 @@ describe('Tasks', () => {
|
||||||
).toBeInTheDocument();
|
).toBeInTheDocument();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders truncates long trace and expands', () => {
|
it('renders truncates long trace and expands', async () => {
|
||||||
renderComponent(tasks);
|
renderComponent(tasks);
|
||||||
|
|
||||||
const trace = tasks[2]?.status?.trace || '';
|
const trace = tasks[2]?.status?.trace || '';
|
||||||
|
@ -72,7 +72,7 @@ describe('Tasks', () => {
|
||||||
// Full trace is not visible
|
// Full trace is not visible
|
||||||
expect(expandedDetails).not.toBeInTheDocument();
|
expect(expandedDetails).not.toBeInTheDocument();
|
||||||
|
|
||||||
userEvent.click(thirdRow);
|
await userEvent.click(thirdRow);
|
||||||
|
|
||||||
expect(
|
expect(
|
||||||
screen.getByRole('row', {
|
screen.getByRole('row', {
|
||||||
|
@ -82,7 +82,7 @@ describe('Tasks', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('Action button', () => {
|
describe('Action button', () => {
|
||||||
const expectDropdownExists = () => {
|
const expectDropdownExists = async () => {
|
||||||
const firstTaskRow = screen.getByRole('row', {
|
const firstTaskRow = screen.getByRole('row', {
|
||||||
name: '1 kafka-connect0:8083 RUNNING',
|
name: '1 kafka-connect0:8083 RUNNING',
|
||||||
});
|
});
|
||||||
|
@ -91,13 +91,13 @@ describe('Tasks', () => {
|
||||||
name: 'Dropdown Toggle',
|
name: 'Dropdown Toggle',
|
||||||
});
|
});
|
||||||
expect(extBtn).toBeEnabled();
|
expect(extBtn).toBeEnabled();
|
||||||
userEvent.click(extBtn);
|
await userEvent.click(extBtn);
|
||||||
expect(screen.getByRole('menu')).toBeInTheDocument();
|
expect(screen.getByRole('menu')).toBeInTheDocument();
|
||||||
};
|
};
|
||||||
|
|
||||||
it('renders action button', () => {
|
it('renders action button', async () => {
|
||||||
renderComponent(tasks);
|
renderComponent(tasks);
|
||||||
expectDropdownExists();
|
await expectDropdownExists();
|
||||||
expect(
|
expect(
|
||||||
screen.getAllByRole('button', { name: 'Dropdown Toggle' }).length
|
screen.getAllByRole('button', { name: 'Dropdown Toggle' }).length
|
||||||
).toEqual(tasks.length);
|
).toEqual(tasks.length);
|
||||||
|
@ -108,11 +108,11 @@ describe('Tasks', () => {
|
||||||
|
|
||||||
it('works as expected', async () => {
|
it('works as expected', async () => {
|
||||||
renderComponent(tasks);
|
renderComponent(tasks);
|
||||||
expectDropdownExists();
|
await expectDropdownExists();
|
||||||
const actionBtn = screen.getAllByRole('menuitem');
|
const actionBtn = screen.getAllByRole('menuitem');
|
||||||
expect(actionBtn[0]).toHaveTextContent('Restart task');
|
expect(actionBtn[0]).toHaveTextContent('Restart task');
|
||||||
|
|
||||||
userEvent.click(actionBtn[0]);
|
await userEvent.click(actionBtn[0]);
|
||||||
expect(
|
expect(
|
||||||
screen.getByText('Are you sure you want to restart the task?')
|
screen.getByText('Are you sure you want to restart the task?')
|
||||||
).toBeInTheDocument();
|
).toBeInTheDocument();
|
||||||
|
|
|
@ -5,7 +5,7 @@ import ClusterContext, {
|
||||||
initialValue,
|
initialValue,
|
||||||
} from 'components/contexts/ClusterContext';
|
} from 'components/contexts/ClusterContext';
|
||||||
import List from 'components/Connect/List/List';
|
import List from 'components/Connect/List/List';
|
||||||
import { act, screen, waitFor } from '@testing-library/react';
|
import { screen, waitFor } from '@testing-library/react';
|
||||||
import userEvent from '@testing-library/user-event';
|
import userEvent from '@testing-library/user-event';
|
||||||
import { render, WithRoute } from 'lib/testHelpers';
|
import { render, WithRoute } from 'lib/testHelpers';
|
||||||
import { clusterConnectConnectorPath, clusterConnectorsPath } from 'lib/paths';
|
import { clusterConnectConnectorPath, clusterConnectorsPath } from 'lib/paths';
|
||||||
|
@ -52,13 +52,11 @@ describe('Connectors List', () => {
|
||||||
|
|
||||||
it('opens broker when row clicked', async () => {
|
it('opens broker when row clicked', async () => {
|
||||||
renderComponent();
|
renderComponent();
|
||||||
await act(() => {
|
await userEvent.click(
|
||||||
userEvent.click(
|
screen.getByRole('row', {
|
||||||
screen.getByRole('row', {
|
name: 'hdfs-source-connector first SOURCE FileStreamSource a b c RUNNING 2 of 2',
|
||||||
name: 'hdfs-source-connector first SOURCE FileStreamSource a b c RUNNING 2 of 2',
|
})
|
||||||
})
|
);
|
||||||
);
|
|
||||||
});
|
|
||||||
await waitFor(() =>
|
await waitFor(() =>
|
||||||
expect(mockedUsedNavigate).toBeCalledWith(
|
expect(mockedUsedNavigate).toBeCalledWith(
|
||||||
clusterConnectConnectorPath(
|
clusterConnectConnectorPath(
|
||||||
|
@ -105,7 +103,7 @@ describe('Connectors List', () => {
|
||||||
const submitButton = screen.getAllByRole('button', {
|
const submitButton = screen.getAllByRole('button', {
|
||||||
name: 'Confirm',
|
name: 'Confirm',
|
||||||
})[0];
|
})[0];
|
||||||
await act(() => userEvent.click(submitButton));
|
await userEvent.click(submitButton);
|
||||||
expect(mockDelete).toHaveBeenCalledWith();
|
expect(mockDelete).toHaveBeenCalledWith();
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -31,16 +31,14 @@ jest.mock('lib/hooks/api/kafkaConnect', () => ({
|
||||||
describe('New', () => {
|
describe('New', () => {
|
||||||
const clusterName = 'my-cluster';
|
const clusterName = 'my-cluster';
|
||||||
const simulateFormSubmit = async () => {
|
const simulateFormSubmit = async () => {
|
||||||
await act(() => {
|
await userEvent.type(
|
||||||
userEvent.type(
|
screen.getByPlaceholderText('Connector Name'),
|
||||||
screen.getByPlaceholderText('Connector Name'),
|
'my-connector'
|
||||||
'my-connector'
|
);
|
||||||
);
|
await userEvent.type(
|
||||||
userEvent.type(
|
screen.getByPlaceholderText('json'),
|
||||||
screen.getByPlaceholderText('json'),
|
'{"class":"MyClass"}'.replace(/[{[]/g, '$&$&')
|
||||||
'{"class":"MyClass"}'.replace(/[{[]/g, '$&$&')
|
);
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(screen.getByPlaceholderText('json')).toHaveValue(
|
expect(screen.getByPlaceholderText('json')).toHaveValue(
|
||||||
'{"class":"MyClass"}'
|
'{"class":"MyClass"}'
|
||||||
|
|
|
@ -33,25 +33,24 @@ const resetConsumerGroupOffsetsMockCalled = () =>
|
||||||
).toBeTruthy();
|
).toBeTruthy();
|
||||||
|
|
||||||
const selectresetTypeAndPartitions = async (resetType: string) => {
|
const selectresetTypeAndPartitions = async (resetType: string) => {
|
||||||
userEvent.click(screen.getByLabelText('Reset Type'));
|
await userEvent.click(screen.getByLabelText('Reset Type'));
|
||||||
userEvent.click(screen.getByText(resetType));
|
await userEvent.click(screen.getByText(resetType));
|
||||||
userEvent.click(screen.getByText('Select...'));
|
await userEvent.click(screen.getByText('Select...'));
|
||||||
await waitFor(() => {
|
|
||||||
userEvent.click(screen.getByText('Partition #0'));
|
await userEvent.click(screen.getByText('Partition #0'));
|
||||||
});
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const resetConsumerGroupOffsetsWith = async (
|
const resetConsumerGroupOffsetsWith = async (
|
||||||
resetType: string,
|
resetType: string,
|
||||||
offset: null | number = null
|
offset: null | number = null
|
||||||
) => {
|
) => {
|
||||||
userEvent.click(screen.getByLabelText('Reset Type'));
|
await userEvent.click(screen.getByLabelText('Reset Type'));
|
||||||
const options = screen.getAllByText(resetType);
|
const options = screen.getAllByText(resetType);
|
||||||
userEvent.click(options.length > 1 ? options[1] : options[0]);
|
await userEvent.click(options.length > 1 ? options[1] : options[0]);
|
||||||
userEvent.click(screen.getByText('Select...'));
|
await userEvent.click(screen.getByText('Select...'));
|
||||||
await waitFor(() => {
|
|
||||||
userEvent.click(screen.getByText('Partition #0'));
|
await userEvent.click(screen.getByText('Partition #0'));
|
||||||
});
|
|
||||||
fetchMock.postOnce(
|
fetchMock.postOnce(
|
||||||
`/api/clusters/${clusterName}/consumer-groups/${groupId}/offsets`,
|
`/api/clusters/${clusterName}/consumer-groups/${groupId}/offsets`,
|
||||||
200,
|
200,
|
||||||
|
@ -64,7 +63,7 @@ const resetConsumerGroupOffsetsWith = async (
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
userEvent.click(screen.getByText('Submit'));
|
await userEvent.click(screen.getByText('Submit'));
|
||||||
await waitFor(() => resetConsumerGroupOffsetsMockCalled());
|
await waitFor(() => resetConsumerGroupOffsetsMockCalled());
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -116,14 +115,14 @@ describe('ResetOffsets', () => {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
await waitFor(() => {
|
|
||||||
userEvent.click(screen.getAllByLabelText('Partition #0')[1]);
|
await userEvent.click(screen.getAllByLabelText('Partition #0')[1]);
|
||||||
});
|
|
||||||
await waitFor(() => {
|
await userEvent.keyboard('10');
|
||||||
userEvent.keyboard('10');
|
|
||||||
});
|
await userEvent.click(screen.getByText('Submit'));
|
||||||
userEvent.click(screen.getByText('Submit'));
|
|
||||||
await waitFor(() => resetConsumerGroupOffsetsMockCalled());
|
await resetConsumerGroupOffsetsMockCalled();
|
||||||
});
|
});
|
||||||
it('calls resetConsumerGroupOffsets with TIMESTAMP', async () => {
|
it('calls resetConsumerGroupOffsets with TIMESTAMP', async () => {
|
||||||
await selectresetTypeAndPartitions('TIMESTAMP');
|
await selectresetTypeAndPartitions('TIMESTAMP');
|
||||||
|
@ -139,7 +138,7 @@ describe('ResetOffsets', () => {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
userEvent.click(screen.getByText('Submit'));
|
await userEvent.click(screen.getByText('Submit'));
|
||||||
await waitFor(() =>
|
await waitFor(() =>
|
||||||
expect(
|
expect(
|
||||||
screen.getByText("This field shouldn't be empty!")
|
screen.getByText("This field shouldn't be empty!")
|
||||||
|
|
|
@ -13,7 +13,6 @@ import {
|
||||||
waitForElementToBeRemoved,
|
waitForElementToBeRemoved,
|
||||||
} from '@testing-library/dom';
|
} from '@testing-library/dom';
|
||||||
import userEvent from '@testing-library/user-event';
|
import userEvent from '@testing-library/user-event';
|
||||||
import { act } from '@testing-library/react';
|
|
||||||
|
|
||||||
const clusterName = 'cluster1';
|
const clusterName = 'cluster1';
|
||||||
const { groupId } = consumerGroupPayload;
|
const { groupId } = consumerGroupPayload;
|
||||||
|
@ -71,7 +70,7 @@ describe('Details component', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('handles [Reset offset] click', async () => {
|
it('handles [Reset offset] click', async () => {
|
||||||
userEvent.click(screen.getByText('Reset offset'));
|
await userEvent.click(screen.getByText('Reset offset'));
|
||||||
expect(mockNavigate).toHaveBeenLastCalledWith(
|
expect(mockNavigate).toHaveBeenLastCalledWith(
|
||||||
clusterConsumerGroupResetRelativePath
|
clusterConsumerGroupResetRelativePath
|
||||||
);
|
);
|
||||||
|
@ -86,19 +85,19 @@ describe('Details component', () => {
|
||||||
|
|
||||||
it('shows confirmation modal on consumer group delete', async () => {
|
it('shows confirmation modal on consumer group delete', async () => {
|
||||||
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
||||||
userEvent.click(screen.getByText('Delete consumer group'));
|
await userEvent.click(screen.getByText('Delete consumer group'));
|
||||||
await waitFor(() =>
|
await waitFor(() =>
|
||||||
expect(screen.queryByRole('dialog')).toBeInTheDocument()
|
expect(screen.queryByRole('dialog')).toBeInTheDocument()
|
||||||
);
|
);
|
||||||
userEvent.click(screen.getByText('Cancel'));
|
await userEvent.click(screen.getByText('Cancel'));
|
||||||
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('handles [Delete consumer group] click', async () => {
|
it('handles [Delete consumer group] click', async () => {
|
||||||
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
expect(screen.queryByRole('dialog')).not.toBeInTheDocument();
|
||||||
await act(() => {
|
|
||||||
userEvent.click(screen.getByText('Delete consumer group'));
|
await userEvent.click(screen.getByText('Delete consumer group'));
|
||||||
});
|
|
||||||
expect(screen.queryByRole('dialog')).toBeInTheDocument();
|
expect(screen.queryByRole('dialog')).toBeInTheDocument();
|
||||||
const deleteConsumerGroupMock = fetchMock.deleteOnce(
|
const deleteConsumerGroupMock = fetchMock.deleteOnce(
|
||||||
`/api/clusters/${clusterName}/consumer-groups/${groupId}`,
|
`/api/clusters/${clusterName}/consumer-groups/${groupId}`,
|
||||||
|
|
|
@ -39,8 +39,8 @@ describe('ListItem', () => {
|
||||||
expect(screen.getByRole('row')).toBeInTheDocument();
|
expect(screen.getByRole('row')).toBeInTheDocument();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should renders list item with topic content open', () => {
|
it('should renders list item with topic content open', async () => {
|
||||||
userEvent.click(screen.getAllByRole('cell')[0].children[0]);
|
await userEvent.click(screen.getAllByRole('cell')[0].children[0]);
|
||||||
expect(screen.getByText('Consumer ID')).toBeInTheDocument();
|
expect(screen.getByText('Consumer ID')).toBeInTheDocument();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -48,10 +48,10 @@ describe('List', () => {
|
||||||
expect(screen.getByText('groupId2')).toBeInTheDocument();
|
expect(screen.getByText('groupId2')).toBeInTheDocument();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('handles onRowClick', () => {
|
it('handles onRowClick', async () => {
|
||||||
const row = screen.getByRole('row', { name: 'groupId1 0 1 1' });
|
const row = screen.getByRole('row', { name: 'groupId1 0 1 1' });
|
||||||
expect(row).toBeInTheDocument();
|
expect(row).toBeInTheDocument();
|
||||||
userEvent.click(row);
|
await userEvent.click(row);
|
||||||
expect(mockedUsedNavigate).toHaveBeenCalledWith(
|
expect(mockedUsedNavigate).toHaveBeenCalledWith(
|
||||||
clusterConsumerGroupDetailsPath(':clusterName', 'groupId1')
|
clusterConsumerGroupDetailsPath(':clusterName', 'groupId1')
|
||||||
);
|
);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import React from 'react';
|
import React from 'react';
|
||||||
import { act, screen } from '@testing-library/react';
|
import { screen } from '@testing-library/react';
|
||||||
import ClustersWidget from 'components/Dashboard/ClustersWidget/ClustersWidget';
|
import ClustersWidget from 'components/Dashboard/ClustersWidget/ClustersWidget';
|
||||||
import userEvent from '@testing-library/user-event';
|
import userEvent from '@testing-library/user-event';
|
||||||
import { render } from 'lib/testHelpers';
|
import { render } from 'lib/testHelpers';
|
||||||
|
@ -16,18 +16,16 @@ describe('ClustersWidget', () => {
|
||||||
data: clustersPayload,
|
data: clustersPayload,
|
||||||
isSuccess: true,
|
isSuccess: true,
|
||||||
}));
|
}));
|
||||||
await act(() => {
|
await render(<ClustersWidget />);
|
||||||
render(<ClustersWidget />);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders clusterWidget list', () => {
|
it('renders clusterWidget list', () => {
|
||||||
expect(screen.getAllByRole('row').length).toBe(3);
|
expect(screen.getAllByRole('row').length).toBe(3);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('hides online cluster widgets', () => {
|
it('hides online cluster widgets', async () => {
|
||||||
expect(screen.getAllByRole('row').length).toBe(3);
|
expect(screen.getAllByRole('row').length).toBe(3);
|
||||||
userEvent.click(screen.getByRole('checkbox'));
|
await userEvent.click(screen.getByRole('checkbox'));
|
||||||
expect(screen.getAllByRole('row').length).toBe(2);
|
expect(screen.getAllByRole('row').length).toBe(2);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ import KsqlDbItem, {
|
||||||
} from 'components/KsqlDb/List/KsqlDbItem/KsqlDbItem';
|
} from 'components/KsqlDb/List/KsqlDbItem/KsqlDbItem';
|
||||||
import { screen } from '@testing-library/dom';
|
import { screen } from '@testing-library/dom';
|
||||||
import { fetchKsqlDbTablesPayload } from 'redux/reducers/ksqlDb/__test__/fixtures';
|
import { fetchKsqlDbTablesPayload } from 'redux/reducers/ksqlDb/__test__/fixtures';
|
||||||
import { act } from '@testing-library/react';
|
|
||||||
|
|
||||||
describe('KsqlDbItem', () => {
|
describe('KsqlDbItem', () => {
|
||||||
const tablesPathname = clusterKsqlDbTablesPath();
|
const tablesPathname = clusterKsqlDbTablesPath();
|
||||||
|
@ -27,37 +26,34 @@ describe('KsqlDbItem', () => {
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
it('renders progressbar when fetching tables and streams', async () => {
|
it('renders progressbar when fetching tables and streams', () => {
|
||||||
await act(() => renderComponent({ fetching: true }));
|
renderComponent({ fetching: true });
|
||||||
expect(screen.getByRole('progressbar')).toBeInTheDocument();
|
expect(screen.getByRole('progressbar')).toBeInTheDocument();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('show no text if no data found', async () => {
|
it('show no text if no data found', () => {
|
||||||
await act(() => renderComponent({}));
|
renderComponent({});
|
||||||
expect(screen.getByText('No tables or streams found')).toBeInTheDocument();
|
expect(screen.getByText('No tables or streams found')).toBeInTheDocument();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('renders with tables', async () => {
|
it('renders with tables', () => {
|
||||||
await act(() =>
|
renderComponent({
|
||||||
renderComponent({
|
rows: {
|
||||||
rows: {
|
tables: fetchKsqlDbTablesPayload.tables,
|
||||||
tables: fetchKsqlDbTablesPayload.tables,
|
streams: [],
|
||||||
streams: [],
|
},
|
||||||
},
|
});
|
||||||
})
|
|
||||||
);
|
|
||||||
expect(screen.getByRole('table').querySelectorAll('td')).toHaveLength(10);
|
expect(screen.getByRole('table').querySelectorAll('td')).toHaveLength(10);
|
||||||
});
|
});
|
||||||
it('renders with streams', async () => {
|
it('renders with streams', () => {
|
||||||
await act(() =>
|
renderComponent({
|
||||||
renderComponent({
|
type: KsqlDbItemType.Streams,
|
||||||
type: KsqlDbItemType.Streams,
|
rows: {
|
||||||
rows: {
|
tables: [],
|
||||||
tables: [],
|
streams: fetchKsqlDbTablesPayload.streams,
|
||||||
streams: fetchKsqlDbTablesPayload.streams,
|
},
|
||||||
},
|
});
|
||||||
})
|
|
||||||
);
|
|
||||||
expect(screen.getByRole('table').querySelectorAll('td')).toHaveLength(10);
|
expect(screen.getByRole('table').querySelectorAll('td')).toHaveLength(10);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -34,17 +34,22 @@ export const StreamPropertiesContainer = styled.label`
|
||||||
`;
|
`;
|
||||||
|
|
||||||
export const InputsContainer = styled.div`
|
export const InputsContainer = styled.div`
|
||||||
|
overflow: hidden;
|
||||||
|
width: 100%;
|
||||||
display: flex;
|
display: flex;
|
||||||
justify-content: center;
|
justify-content: center;
|
||||||
gap: 10px;
|
gap: 10px;
|
||||||
`;
|
`;
|
||||||
|
|
||||||
export const StreamPropertiesInputWrapper = styled.div`
|
export const StreamPropertiesInputWrapper = styled.div`
|
||||||
|
& {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
& > input {
|
& > input {
|
||||||
|
width: 100%;
|
||||||
height: 40px;
|
height: 40px;
|
||||||
border: 1px solid grey;
|
border: 1px solid grey;
|
||||||
border-radius: 4px;
|
border-radius: 4px;
|
||||||
min-width: 300px;
|
|
||||||
font-size: 16px;
|
font-size: 16px;
|
||||||
padding-left: 15px;
|
padding-left: 15px;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,6 @@ import React from 'react';
|
||||||
import QueryForm, { Props } from 'components/KsqlDb/Query/QueryForm/QueryForm';
|
import QueryForm, { Props } from 'components/KsqlDb/Query/QueryForm/QueryForm';
|
||||||
import { screen, waitFor, within } from '@testing-library/dom';
|
import { screen, waitFor, within } from '@testing-library/dom';
|
||||||
import userEvent from '@testing-library/user-event';
|
import userEvent from '@testing-library/user-event';
|
||||||
import { act } from '@testing-library/react';
|
|
||||||
|
|
||||||
const renderComponent = (props: Props) => render(<QueryForm {...props} />);
|
const renderComponent = (props: Props) => render(<QueryForm {...props} />);
|
||||||
|
|
||||||
|
@ -57,10 +56,9 @@ describe('QueryForm', () => {
|
||||||
submitHandler: submitFn,
|
submitHandler: submitFn,
|
||||||
});
|
});
|
||||||
|
|
||||||
await act(() =>
|
await userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||||
userEvent.click(screen.getByRole('button', { name: 'Execute' }))
|
|
||||||
);
|
await waitFor(() => {
|
||||||
waitFor(() => {
|
|
||||||
expect(screen.getByText('ksql is a required field')).toBeInTheDocument();
|
expect(screen.getByText('ksql is a required field')).toBeInTheDocument();
|
||||||
expect(submitFn).not.toBeCalled();
|
expect(submitFn).not.toBeCalled();
|
||||||
});
|
});
|
||||||
|
@ -76,12 +74,16 @@ describe('QueryForm', () => {
|
||||||
submitHandler: submitFn,
|
submitHandler: submitFn,
|
||||||
});
|
});
|
||||||
|
|
||||||
await act(() => {
|
const textbox = screen.getAllByRole('textbox');
|
||||||
userEvent.paste(screen.getAllByRole('textbox')[0], 'show tables;');
|
textbox[0].focus();
|
||||||
userEvent.paste(screen.getByRole('textbox', { name: 'key' }), 'test');
|
await userEvent.paste('show tables;');
|
||||||
userEvent.paste(screen.getByRole('textbox', { name: 'value' }), 'test');
|
const key = screen.getByRole('textbox', { name: 'key' });
|
||||||
userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
key.focus();
|
||||||
});
|
await userEvent.paste('test');
|
||||||
|
const value = screen.getByRole('textbox', { name: 'value' });
|
||||||
|
value.focus();
|
||||||
|
await userEvent.paste('test');
|
||||||
|
await userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||||
|
|
||||||
expect(
|
expect(
|
||||||
screen.queryByText('ksql is a required field')
|
screen.queryByText('ksql is a required field')
|
||||||
|
@ -106,8 +108,8 @@ describe('QueryForm', () => {
|
||||||
|
|
||||||
expect(screen.getByRole('button', { name: 'Clear results' })).toBeEnabled();
|
expect(screen.getByRole('button', { name: 'Clear results' })).toBeEnabled();
|
||||||
|
|
||||||
await act(() =>
|
await userEvent.click(
|
||||||
userEvent.click(screen.getByRole('button', { name: 'Clear results' }))
|
screen.getByRole('button', { name: 'Clear results' })
|
||||||
);
|
);
|
||||||
|
|
||||||
expect(clearFn).toBeCalled();
|
expect(clearFn).toBeCalled();
|
||||||
|
@ -125,39 +127,12 @@ describe('QueryForm', () => {
|
||||||
|
|
||||||
expect(screen.getByRole('button', { name: 'Stop query' })).toBeEnabled();
|
expect(screen.getByRole('button', { name: 'Stop query' })).toBeEnabled();
|
||||||
|
|
||||||
await act(() =>
|
await userEvent.click(screen.getByRole('button', { name: 'Stop query' }));
|
||||||
userEvent.click(screen.getByRole('button', { name: 'Stop query' }))
|
|
||||||
);
|
|
||||||
|
|
||||||
expect(cancelFn).toBeCalled();
|
expect(cancelFn).toBeCalled();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('submits form with ctrl+enter on KSQL editor', async () => {
|
it('add new property', async () => {
|
||||||
const submitFn = jest.fn();
|
|
||||||
renderComponent({
|
|
||||||
fetching: false,
|
|
||||||
hasResults: false,
|
|
||||||
handleClearResults: jest.fn(),
|
|
||||||
handleSSECancel: jest.fn(),
|
|
||||||
submitHandler: submitFn,
|
|
||||||
});
|
|
||||||
|
|
||||||
await act(() => {
|
|
||||||
userEvent.paste(
|
|
||||||
within(screen.getByLabelText('KSQL')).getByRole('textbox'),
|
|
||||||
'show tables;'
|
|
||||||
);
|
|
||||||
|
|
||||||
userEvent.type(
|
|
||||||
within(screen.getByLabelText('KSQL')).getByRole('textbox'),
|
|
||||||
'{ctrl}{enter}'
|
|
||||||
);
|
|
||||||
});
|
|
||||||
|
|
||||||
expect(submitFn.mock.calls.length).toBe(1);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('adds new property', async () => {
|
|
||||||
renderComponent({
|
renderComponent({
|
||||||
fetching: false,
|
fetching: false,
|
||||||
hasResults: false,
|
hasResults: false,
|
||||||
|
@ -168,11 +143,9 @@ describe('QueryForm', () => {
|
||||||
|
|
||||||
const textbox = screen.getByLabelText('key');
|
const textbox = screen.getByLabelText('key');
|
||||||
await userEvent.type(textbox, 'prop_name');
|
await userEvent.type(textbox, 'prop_name');
|
||||||
await act(() => {
|
await userEvent.click(
|
||||||
userEvent.click(
|
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
);
|
||||||
);
|
|
||||||
});
|
|
||||||
expect(screen.getAllByRole('textbox', { name: 'key' }).length).toEqual(2);
|
expect(screen.getAllByRole('textbox', { name: 'key' }).length).toEqual(2);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -185,11 +158,9 @@ describe('QueryForm', () => {
|
||||||
submitHandler: jest.fn(),
|
submitHandler: jest.fn(),
|
||||||
});
|
});
|
||||||
|
|
||||||
await act(() => {
|
await userEvent.click(
|
||||||
userEvent.click(
|
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
);
|
||||||
);
|
|
||||||
});
|
|
||||||
expect(screen.getAllByRole('textbox', { name: 'key' }).length).toEqual(1);
|
expect(screen.getAllByRole('textbox', { name: 'key' }).length).toEqual(1);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -201,16 +172,18 @@ describe('QueryForm', () => {
|
||||||
handleSSECancel: jest.fn(),
|
handleSSECancel: jest.fn(),
|
||||||
submitHandler: jest.fn(),
|
submitHandler: jest.fn(),
|
||||||
});
|
});
|
||||||
|
const textBoxes = screen.getAllByRole('textbox', { name: 'key' });
|
||||||
|
textBoxes[0].focus();
|
||||||
|
await userEvent.paste('test');
|
||||||
|
await userEvent.click(
|
||||||
|
screen.getByRole('button', { name: 'Add Stream Property' })
|
||||||
|
);
|
||||||
|
await userEvent.click(screen.getAllByLabelText('deleteProperty')[0]);
|
||||||
|
|
||||||
await act(() => {
|
await screen.getByRole('button', { name: 'Add Stream Property' });
|
||||||
userEvent.paste(screen.getByRole('textbox', { name: 'key' }), 'test');
|
|
||||||
userEvent.click(
|
await userEvent.click(screen.getAllByLabelText('deleteProperty')[0]);
|
||||||
screen.getByRole('button', { name: 'Add Stream Property' })
|
|
||||||
);
|
expect(textBoxes.length).toEqual(1);
|
||||||
});
|
|
||||||
await act(() => {
|
|
||||||
userEvent.click(screen.getAllByLabelText('deleteProperty')[0]);
|
|
||||||
});
|
|
||||||
expect(screen.getAllByRole('textbox', { name: 'key' }).length).toEqual(1);
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -6,7 +6,6 @@ import Query, {
|
||||||
import { screen } from '@testing-library/dom';
|
import { screen } from '@testing-library/dom';
|
||||||
import fetchMock from 'fetch-mock';
|
import fetchMock from 'fetch-mock';
|
||||||
import { clusterKsqlDbQueryPath } from 'lib/paths';
|
import { clusterKsqlDbQueryPath } from 'lib/paths';
|
||||||
import { act } from '@testing-library/react';
|
|
||||||
import userEvent from '@testing-library/user-event';
|
import userEvent from '@testing-library/user-event';
|
||||||
|
|
||||||
const clusterName = 'testLocal';
|
const clusterName = 'testLocal';
|
||||||
|
@ -41,10 +40,10 @@ describe('Query', () => {
|
||||||
});
|
});
|
||||||
const inputs = screen.getAllByRole('textbox');
|
const inputs = screen.getAllByRole('textbox');
|
||||||
const textAreaElement = inputs[0] as HTMLTextAreaElement;
|
const textAreaElement = inputs[0] as HTMLTextAreaElement;
|
||||||
await act(() => {
|
|
||||||
userEvent.paste(textAreaElement, 'show tables;');
|
textAreaElement.focus();
|
||||||
userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
await userEvent.paste('show tables;');
|
||||||
});
|
await userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||||
|
|
||||||
expect(mock.calls().length).toBe(1);
|
expect(mock.calls().length).toBe(1);
|
||||||
});
|
});
|
||||||
|
@ -59,18 +58,20 @@ describe('Query', () => {
|
||||||
Object.defineProperty(window, 'EventSource', {
|
Object.defineProperty(window, 'EventSource', {
|
||||||
value: EventSourceMock,
|
value: EventSourceMock,
|
||||||
});
|
});
|
||||||
await act(() => {
|
|
||||||
const inputs = screen.getAllByRole('textbox');
|
const inputs = screen.getAllByRole('textbox');
|
||||||
const textAreaElement = inputs[0] as HTMLTextAreaElement;
|
const textAreaElement = inputs[0] as HTMLTextAreaElement;
|
||||||
userEvent.paste(textAreaElement, 'show tables;');
|
textAreaElement.focus();
|
||||||
});
|
await userEvent.paste('show tables;');
|
||||||
await act(() => {
|
|
||||||
userEvent.paste(screen.getByLabelText('key'), 'key');
|
const key = screen.getByLabelText('key');
|
||||||
userEvent.paste(screen.getByLabelText('value'), 'value');
|
key.focus();
|
||||||
});
|
await userEvent.paste('key');
|
||||||
await act(() => {
|
const value = screen.getByLabelText('value');
|
||||||
userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
value.focus();
|
||||||
});
|
await userEvent.paste('value');
|
||||||
|
|
||||||
|
await userEvent.click(screen.getByRole('button', { name: 'Execute' }));
|
||||||
|
|
||||||
expect(mock.calls().length).toBe(1);
|
expect(mock.calls().length).toBe(1);
|
||||||
});
|
});
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue