
* Return id, version, schema, and subject after creating a new schema * Throw 422 error code if incoming new schema is unprocessable entity due to invalid fields * Return 409/Conflict error code if schema is duplicate. Change endpoint of createNewSchema method * Fix submitting new subject or new version if the subject already exists * Include schema type to schema objects. By default it's AVRO * [ISSUE-200] Update FE to support new version of api * Add one more schema-registry with version 5.5.0 to docker-compose files and app properties * Upgrade Confluent service versions in tests up to 5.5.0 * Set schemaType is required and ignore when submitting to Schema Registry if it's NULL Co-authored-by: Oleg Shuralev <workshur@gmail.com>
153 lines
6 KiB
YAML
153 lines
6 KiB
YAML
---
|
|
version: '2'
|
|
services:
|
|
|
|
kafka-ui:
|
|
container_name: kafka-ui
|
|
image: provectuslabs/kafka-ui:latest
|
|
ports:
|
|
- 8080:8080
|
|
depends_on:
|
|
- zookeeper0
|
|
- zookeeper1
|
|
- kafka0
|
|
- kafka1
|
|
- schemaregistry0
|
|
- kafka-connect0
|
|
environment:
|
|
KAFKA_CLUSTERS_0_NAME: local
|
|
KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka0:29092
|
|
KAFKA_CLUSTERS_0_ZOOKEEPER: zookeeper0:2181
|
|
KAFKA_CLUSTERS_0_JMXPORT: 9997
|
|
KAFKA_CLUSTERS_0_SCHEMAREGISTRY: http://schemaregistry0:8085
|
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_NAME: first
|
|
KAFKA_CLUSTERS_0_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
|
KAFKA_CLUSTERS_1_NAME: secondLocal
|
|
KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: kafka1:29092
|
|
KAFKA_CLUSTERS_1_ZOOKEEPER: zookeeper1:2181
|
|
KAFKA_CLUSTERS_1_JMXPORT: 9998
|
|
KAFKA_CLUSTERS_1_SCHEMAREGISTRY: http://schemaregistry0:8085
|
|
KAFKA_CLUSTERS_1_KAFKACONNECT_0_NAME: first
|
|
KAFKA_CLUSTERS_1_KAFKACONNECT_0_ADDRESS: http://kafka-connect0:8083
|
|
|
|
zookeeper0:
|
|
image: confluentinc/cp-zookeeper:5.2.4
|
|
environment:
|
|
ZOOKEEPER_CLIENT_PORT: 2181
|
|
ZOOKEEPER_TICK_TIME: 2000
|
|
ports:
|
|
- 2181:2181
|
|
|
|
kafka0:
|
|
image: confluentinc/cp-kafka:5.2.4
|
|
depends_on:
|
|
- zookeeper0
|
|
ports:
|
|
- 9092:9092
|
|
- 9997:9997
|
|
environment:
|
|
KAFKA_BROKER_ID: 1
|
|
KAFKA_ZOOKEEPER_CONNECT: zookeeper0:2181
|
|
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka0:29092,PLAINTEXT_HOST://localhost:9092
|
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
|
JMX_PORT: 9997
|
|
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka0 -Dcom.sun.management.jmxremote.rmi.port=9997
|
|
|
|
zookeeper1:
|
|
image: confluentinc/cp-zookeeper:5.2.4
|
|
environment:
|
|
ZOOKEEPER_CLIENT_PORT: 2181
|
|
ZOOKEEPER_TICK_TIME: 2000
|
|
|
|
kafka1:
|
|
image: confluentinc/cp-kafka:5.2.4
|
|
depends_on:
|
|
- zookeeper1
|
|
ports:
|
|
- 9093:9093
|
|
- 9998:9998
|
|
environment:
|
|
KAFKA_BROKER_ID: 1
|
|
KAFKA_ZOOKEEPER_CONNECT: zookeeper1:2181
|
|
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:29092,PLAINTEXT_HOST://localhost:9093
|
|
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
|
|
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
|
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
|
|
JMX_PORT: 9998
|
|
KAFKA_JMX_OPTS: -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.rmi.port=9998
|
|
|
|
schemaregistry0:
|
|
image: confluentinc/cp-schema-registry:5.2.4
|
|
ports:
|
|
- 8085:8085
|
|
depends_on:
|
|
- zookeeper0
|
|
- kafka0
|
|
environment:
|
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka0:29092
|
|
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper0:2181
|
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry0
|
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry0:8085
|
|
|
|
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
|
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
|
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
|
|
|
schemaregistry1:
|
|
image: confluentinc/cp-schema-registry:5.5.0
|
|
ports:
|
|
- 18085:8085
|
|
depends_on:
|
|
- zookeeper1
|
|
- kafka1
|
|
environment:
|
|
SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:29092
|
|
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper1:2181
|
|
SCHEMA_REGISTRY_KAFKASTORE_SECURITY_PROTOCOL: PLAINTEXT
|
|
SCHEMA_REGISTRY_HOST_NAME: schemaregistry1
|
|
SCHEMA_REGISTRY_LISTENERS: http://schemaregistry1:8085
|
|
|
|
SCHEMA_REGISTRY_SCHEMA_REGISTRY_INTER_INSTANCE_PROTOCOL: "http"
|
|
SCHEMA_REGISTRY_LOG4J_ROOT_LOGLEVEL: INFO
|
|
SCHEMA_REGISTRY_KAFKASTORE_TOPIC: _schemas
|
|
|
|
kafka-connect0:
|
|
image: confluentinc/cp-kafka-connect:5.2.4
|
|
ports:
|
|
- 8083:8083
|
|
depends_on:
|
|
- kafka0
|
|
- schemaregistry0
|
|
environment:
|
|
CONNECT_BOOTSTRAP_SERVERS: kafka0:29092
|
|
CONNECT_GROUP_ID: compose-connect-group
|
|
CONNECT_CONFIG_STORAGE_TOPIC: _connect_configs
|
|
CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1
|
|
CONNECT_OFFSET_STORAGE_TOPIC: _connect_offset
|
|
CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1
|
|
CONNECT_STATUS_STORAGE_TOPIC: _connect_status
|
|
CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1
|
|
CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
|
CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
|
CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.storage.StringConverter
|
|
CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schemaregistry0:8085
|
|
CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
|
CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
|
|
CONNECT_REST_ADVERTISED_HOST_NAME: kafka-connect0
|
|
CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components"
|
|
|
|
kafka-init-topics:
|
|
image: confluentinc/cp-kafka:5.2.4
|
|
volumes:
|
|
- ./message.json:/data/message.json
|
|
depends_on:
|
|
- kafka1
|
|
command: "bash -c 'echo Waiting for Kafka to be ready... && \
|
|
cub kafka-ready -b kafka1:29092 1 30 && \
|
|
kafka-topics --create --topic second.users --partitions 3 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
|
kafka-topics --create --topic second.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper1:2181 && \
|
|
kafka-topics --create --topic first.messages --partitions 2 --replication-factor 1 --if-not-exists --zookeeper zookeeper0:2181 && \
|
|
kafka-console-producer --broker-list kafka1:29092 -topic second.users < /data/message.json'"
|