zookeeper參照https://www.cnblogs.com/wintersoft/p/11128484.html
mkdir /opt/kafka -p
vim /opt/kafka/Dockerfile
FROM wurstmeister/kafka:2.12-2.3.0 EXPOSE 9092
sudo mkdir -p /var/log/kafka;sudo chmod -R 777 /var/log/kafka
vim /opt/kafka/docker-compose.yml
version: '2' services: kafka: image: v-kafka container_name: kafka build: context: . dockerfile: Dockerfile restart: always ports: - "9092:9092" environment: KAFKA_ADVERTISED_HOST_NAME: kafkaserver KAFKA_ADVERTISED_PORT: 9092 KAFKA_ZOOKEEPER_CONNECT: "zookeeperserver:2181" volumes: - /var/log/kafka/:/kafka - /var/run/docker.sock:/var/run/docker.sock extra_hosts: - "kafkaserver:192.168.0.101" - "zookeeperserver:192.168.0.101"
生成啟動
cd /opt/kafka/
docker-compose build
docker-compose up -d --force-recreate
docker-compose down
docker-compose restart
查看進程
netstat -anltp|grep 9092
查看日志
docker logs --tail="500" kafka
docker logs -f kafka
進入容器
docker exec -it kafka /bin/bash
偽集群
sudo mkdir -p /var/log/kafka/node1;sudo chmod -R 777 /var/log/kafka/node1
sudo mkdir -p /var/log/kafka/node2;sudo chmod -R 777 /var/log/kafka/node2
sudo mkdir -p /var/log/kafka/node3;sudo chmod -R 777 /var/log/kafka/node3
vim /opt/kafka/docker-compose.yml
version: '2' services: kafka1: image: v-kafka1 container_name: kafka1 build: context: . dockerfile: Dockerfile restart: always ports: - 9011:9092 environment: KAFKA_PORT: 9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafkaserver1:9011 KAFKA_ADVERTISED_HOST_NAME: kafkaserver1 KAFKA_ADVERTISED_PORT: 9011 KAFKA_BROKER_ID: 1 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 KAFKA_ZOOKEEPER_CONNECT: zookeeperserver1:2181,zookeeperserver2:2182,zookeeperserver3:2183 KAFKA_DELETE_TOPIC_ENABLE: "true" volumes: - /var/log/kafka/node1:/kafka - /var/run/docker.sock:/var/run/docker.sock extra_hosts: - "kafkaserver1:192.168.0.101" - "kafkaserver2:192.168.0.101" - "kafkaserver3:192.168.0.101" - "zookeeperserver1:192.168.0.101" - "zookeeperserver2:192.168.0.101" - "zookeeperserver3:192.168.0.101" kafka2: image: v-kafka2 container_name: kafka2 build: context: . dockerfile: Dockerfile restart: always ports: - 9012:9092 environment: KAFKA_PORT: 9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafkaserver2:9012 KAFKA_ADVERTISED_HOST_NAME: kafkaserver2 KAFKA_ADVERTISED_PORT: 9012 KAFKA_BROKER_ID: 2 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 KAFKA_ZOOKEEPER_CONNECT: zookeeperserver1:2181,zookeeperserver2:2182,zookeeperserver3:2183 KAFKA_DELETE_TOPIC_ENABLE: "true" volumes: - /var/log/kafka/node2:/kafka - /var/run/docker.sock:/var/run/docker.sock extra_hosts: - "kafkaserver1:192.168.0.101" - "kafkaserver2:192.168.0.101" - "kafkaserver3:192.168.0.101" - "zookeeperserver1:192.168.0.101" - "zookeeperserver2:192.168.0.101" - "zookeeperserver3:192.168.0.101" kafka3: image: v-kafka3 container_name: kafka3 build: context: . dockerfile: Dockerfile restart: always ports: - 9013:9092 environment: KAFKA_PORT: 9092 KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafkaserver3:9013 KAFKA_ADVERTISED_HOST_NAME: kafkaserver3 KAFKA_ADVERTISED_PORT: 9013 KAFKA_BROKER_ID: 3 KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 2 KAFKA_ZOOKEEPER_CONNECT: zookeeperserver1:2181,zookeeperserver2:2182,zookeeperserver3:2183 KAFKA_DELETE_TOPIC_ENABLE: "true" volumes: - /var/log/kafka/node3:/kafka - /var/run/docker.sock:/var/run/docker.sock extra_hosts: - "kafkaserver1:192.168.0.101" - "kafkaserver2:192.168.0.101" - "kafkaserver3:192.168.0.101" - "zookeeperserver1:192.168.0.101" - "zookeeperserver2:192.168.0.101" - "zookeeperserver3:192.168.0.101"
配置key規則:在前面加KAFKA_前綴 全部大寫 “.”用“_”代替
如:
增加 Kafka 堆的內存大小 KAFKA_HEAP_OPTS=-Xmx4G -Xms4G
KAFKA_LOG_DIRS=/kafka/logs 時 volumes:- "./kafka3/logs:/kafka/logs"
kafka-manager的environment可設置APPLICATION_SECRET: "xxx"
KAFKA_LISTENERS的值 為內網地址
沒配置delete.topic.enable=true,只是軟刪除
如果將topic軟刪除,java客戶端會報:
WARN Error while fetching metadata with correlation id 0 : {test=LEADER_NOT_AVAILABLE} (org.apache.kafka.clients.NetworkClient)
報 org.apache.kafka.clients.NetworkClient : [Producer clientId=producer-3] 1 partitions have leader brokers without a matching listener, including [log-0]
一般是zookeeper的ip:port配置導致kafka報錯問題,配置好后需清理zookeeper數據才會正常。
復制配置
docker cp kafka1:/opt/kafka/config/ /opt/kafka/kafka1_config_bak/
kafka-manager需在界面手動添加集群配置才能顯示。
測試kafka
進入容器
docker exec -it kafka1 /bin/bash
創建topic
/opt/kafka/bin/kafka-topics.sh --create --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic --partitions 3 --replication-factor 3
注:replication-factor個數不能超過broker的個數
查看當前topic列表
/opt/kafka/bin/kafka-topics.sh --list --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013
運行一個消息生產者,指定topic為剛剛創建的myTestTopic
/opt/kafka/bin/kafka-console-producer.sh --broker-list 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic
輸入任意字符 然后ctrl+c退出
查看指定topic明細
/opt/kafka/bin/kafka-topics.sh --describe --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic
消費消息
/opt/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic --from-beginning
ctrl+c退出
刪除topic
/opt/kafka/bin/kafka-topics.sh --delete --bootstrap-server 192.168.0.101:9011,192.168.0.101:9012,192.168.0.101:9013 --topic myTestTopic
如果不能刪除,docker啟動時配置KAFKA_DELETE_TOPIC_ENABLE: "true"