docker部署kafka集群 (多台服务器,生产上注意先查看版本号)
1. 参考
docker部署kafka集群 (多台服务器)
2. 安装 docker-compose
查看之前的博客进行部署(动动小手)
3. 安装 kafka-cluster
目录结构
docker-kafka-cluster
├── docker-compose .yml
├── .env
└── start .sh
version: '3'
services:
kafka-manager:
image: sheepkiller/kafka-manager
restart: always
hostname: kafka-manager
container_name: kafka-manager
ports:
- 9000:9000
environment:
ZK_HOSTS: ${HOST1}:2181 ,${HOST2}:2181 ,${HOST3}:2181
KAFKA_BROKERS: ${HOST1}:9092 ,${HOST2}:9092 ,${HOST3}:9092
APPLICATION_SECRET: letmein
KM_ARGS: -Djava .net .preferIPv4Stack=true
network_mode: host
zoo1:
image: wurstmeister/zookeeper
restart: always
hostname: zoo1
container_name: zoo1
ports:
- 2181:2181
- 2888:2888
- 3888:3888
volumes:
- /data/kafka-cluster/zoo1/data:/data:Z
- /data/kafka-cluster/zoo1/datalog:/datalog:Z
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server .1=${HOST1}:2888 :3888 ;2181 server .2=${HOST2}:2888 :3888 ;2181 server .3=${HOST3}:2888 :3888 ;2181
network_mode: host
kafka1:
image: wurstmeister/kafka
restart: always
hostname: kafka1
container_name: kafka1
ports:
- 9092:9092
volumes:
- /data/kafka-cluster/kafka1/logs:/kafka:Z
environment:
KAFKA_ADVERTISED_HOST_NAME: ${HOST1}
KAFKA_HOST_NAME: ${HOST1}
KAFKA_ADVERTISED_PORT: 9092
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: ${HOST1}:2181 ,${HOST2}:2181 ,${HOST3}:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://${HOST1}:9092
KAFKA_LISTENERS: PLAINTEXT://${HOST1}:9092
network_mode: host
zoo2:
image: wurstmeister/zookeeper
restart: always
hostname: zoo2
container_name: zoo2
ports:
- 2181:2181
- 2888:2888
- 3888:3888
volumes:
- /data/kafka-cluster/zoo2/data:/data:Z
- /data/kafka-cluster/zoo2/datalog:/datalog:Z
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=${HOST1}:2888:3888 ;2181 server.2=${HOST2}:2888:3888 ;2181 server.3=${HOST3}:2888:3888 ;2181
network_mode: host
kafka2:
image: wurstmeister/kafka
restart: always
hostname: kafka2
container_name: kafka2
ports:
- 9092:9092
environment:
KAFKA_ADVERTISED_HOST_NAME: ${HOST2}
KAFKA_HOST_NAME: ${HOST2}
KAFKA_ADVERTISED_PORT: 9092
KAFKA_BROKER_ID: 2
KAFKA_ZOOKEEPER_CONNECT: ${HOST1}:2181 ,${HOST2}:2181 ,${HOST3}:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://${HOST2}:9092
KAFKA_LISTENERS: PLAINTEXT://${HOST2}:9092
volumes:
- /data/kafka-cluster/kafka2/logs:/kafka:Z
network_mode: host
zoo3:
image: wurstmeister/zookeeper
restart: always
hostname: zoo3
container_name: zoo3
ports:
- 2181:2181
- 2888:2888
- 3888:3888
volumes:
- /data/kafka-cluster/zoo3/data:/data:Z
- /data/kafka-cluster/zoo3/datalog:/datalog:Z
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=${HOST1}:2888:3888 ;2181 server.2=${HOST2}:2888:3888 ;2181 server.3=${HOST3}:2888:3888 ;2181
network_mode: host
kafka3:
image: wurstmeister/kafka
restart: always
hostname: kafka3
container_name: kafka3
ports:
- 9092:9092
environment:
KAFKA_ADVERTISED_HOST_NAME: ${HOST3}
KAFKA_HOST_NAME: ${HOST3}
KAFKA_ADVERTISED_PORT: 9092
KAFKA_BROKER_ID: 3
KAFKA_ZOOKEEPER_CONNECT: ${HOST1}:2181 ,${HOST2}:2181 ,${HOST3}:2181
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT ://${HOST3}:9092
KAFKA_LISTENERS: PLAINTEXT://${HOST3}:9092
volumes:
- /data/kafka-cluster/kafka3/logs:/kafka:Z
network_mode: host
[root@k8s-01 docker-kafka-cluster]# cat .env HOST1=192.168.3.201 HOST2=192.168.3.202 HOST3=192.168.3.203
[root@k8s-01 docker-kafka-cluster]# cat start .sh # 部署 zoo1 kafka1 kafka-manager mkdir -p /data/kafka-cluster/zoo1/data mkdir -p /data/kafka-cluster/zoo1/datalog mkdir -p /data/kafka-cluster/kafka1/logs docker-compose up -d zoo1 docker-compose up -d kafka1 docker-compose up -d kafka-manager # 部署 zoo2 kafka2 # mkdir -p /data/kafka-cluster/zoo2/data # mkdir -p /data/kafka-cluster/zoo2/datalog # mkdir -p /data/kafka-cluster/kafka2/logs # docker-compose up -d zoo2 # docker-compose up -d kafka2 # 部署 zoo3 kafka3 # mkdir -p /data/kafka-cluster/zoo3/data # mkdir -p /data/kafka-cluster/zoo3/datalog # mkdir -p /data/kafka-cluster/kafka3/logs # docker-compose up -d zoo3 # docker-compose up -d kafka3
分别在 3 台主机上执行 start .sh
4.通过 shell 测试
1. 在Broker1服务器节点上创建一个用于测试的topic
在Broker1上创建一个副本为3、分区为5的topic用于测试。
因Kafka的topic所有分区会分散在不同Broker上,所以该topic的5个分区会被分散到3个Broker上,其中有2个Broker得到2个分区,另1个Broker只有1个分区。该结论在下面将会得到验证。
[root@k8s-01 docker-kafka-cluster]# docker exec -it kafka1 /bin/bash bash-5.1# cd /opt/kafka_2 .13-2 .7 .1/bin/ bash-5.1# kafka-topics .sh --create --zookeeper 192 .168 .153.7:2181 -- replication-factor 3 --partitions 5 --topic TestTopic Created topic TestTopic .
查看新创建的topic信息

详见如下解释
|
Topic: TestTopic PartitionCount: 5 ReplicationFactor: 3 |
代表TestTopic有5个分区,3个副本节点 |
|
Topic: TestTopic Partition: 0 Leader: 3 Replicas: 3,1,2 Isr: 3,1,2 |
Leader:3代表TestTopic下的分区0的Leader Replica在Broker .id = 3节点上 Replicas代表他的副本节点有Broker.id = 3,1,2(包括Leader Replica和Follower Replica,且不管是否存活) Isr表示存活并且同步Leader节点的副本有Broker.id=3 ,1,2 |
2.kafka集群验证
上一步在Broker1上创建了一个topic: TestTopic, 然后,分别进入Kafka2和Kafka3容器内,查看是否已同步topic。
查看kafka2

查看kafka3

可以查看到,Kafka2和Kafka3容器应用服务上已同步新创建的topic。
分别在Broker1上运行一个生产者,Broker2和Broker3上运行一个消费者
broker1运行生产者
bash-5.1# kafka-console-producer .sh --broker-list 192 .168 .3 .201:9092 --topic TestTopic >hello world # 输入的信息
broker2运行消费者,会得到生产者消息发来的消息
bash-5.1# kafka-console-consumer .sh --bootstrap-server 192 .168 .3 .202:9092 --topic TestTopic --from-beginning hello world # 接收的消息
broker3运行消费者,会得到生产者消息发来的消息
bash-5.1# kafka-console-consumer .sh --bootstrap-server 192 .168 .3 .203:9092 --topic TestTopic --from-beginning hello world # 接收的消息

浙公网安备 33010602011771号