Apache kafka 卡夫卡卡特和Docker:消息太大
我们正在使用以下配置运行Kafka群集:Apache kafka 卡夫卡卡特和Docker:消息太大,apache-kafka,Apache Kafka,我们正在使用以下配置运行Kafka群集: --- version: '2' services: zookeeper: image: confluentinc/cp-zookeeper:latest ports: - 32181:32181 environment: ZOOKEEPER_CLIENT_PORT: 32181 ZOOKEEPER_TICK_TIME: 2000 extra_hosts: - "mob
---
version: '2'
services:
zookeeper:
image: confluentinc/cp-zookeeper:latest
ports:
- 32181:32181
environment:
ZOOKEEPER_CLIENT_PORT: 32181
ZOOKEEPER_TICK_TIME: 2000
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"
kafka:
image: confluentinc/cp-kafka:latest
hostname: kafka-host
ports:
- 19092:19092
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_MESSAGE_MAX_BYTES: 22797510
KAFKA_REPLICA_FETCH_MAX_BYTES: 22797510
KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://docker.for.mac.host.internal:19092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"
kafka2:
image: confluentinc/cp-kafka:latest
hostname: kafka-host2
ports:
- 29092:29092
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 2
KAFKA_MESSAGE_MAX_BYTES: 22797510
KAFKA_REPLICA_FETCH_MAX_BYTES: 22797510
KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://docker.for.mac.host.internal:29092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"
kafka3:
image: confluentinc/cp-kafka:latest
hostname: kafka-host2
ports:
- 39092:39092
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 3
KAFKA_MESSAGE_MAX_BYTES: 22797510
KAFKA_REPLICA_FETCH_MAX_BYTES: 22797510
KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://docker.for.mac.host.internal:39092
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 3
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"
schema-registry:
image: confluentinc/cp-schema-registry:latest
hostname: sr-host
ports:
- 29081:29081
depends_on:
- zookeeper
- kafka
environment:
SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:32181
SCHEMA_REGISTRY_HOST_NAME: sr-host
SCHEMA_REGISTRY_LISTENERS: http://sr-host:29081
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"
kafka-rest:
image: confluentinc/cp-kafka-rest:latest
hostname: kr-host
ports:
- 29080:29080
depends_on:
- zookeeper
- kafka
- schema-registry
environment:
KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper:32181
KAFKA_REST_LISTENERS: http://kr-host:29080
KAFKA_REST_SCHEMA_REGISTRY_URL: http://sr-host:29081
KAFKA_REST_HOST_NAME: kr-host
KAFKA_REST_BOOTSTRAP_SERVERS: docker.for.mac.host.internal:29092
extra_hosts:
- "moby:127.0.0.1"
- "localhost: 127.0.0.1"
当我们试图发布消息时
kafkacat -b docker.for.mac.host.internal:19092,docker.for.mac.host.internal:29092,docker.for.mac.host.internal:39092 -P -t cr -K, -l file.txt
但这给我们带来了一个错误
%错误:无法生成消息(2279751字节):代理:消息大小太大
为什么代理忽略KAFKA_MESSAGE_MAX_BYTES的值?您是否重新启动了KAFKA群集?我做到了:(docker compose down和docker compose up)您是否尝试在容器内复制
file.txt