docker运维部署之kafka集群部署

主机 部署组件
192.168.0.33 zookeeper、kafka
192.168.0.34 zookeeper、kafka
192.168.0.35 zookeeper、kafka

1. 获取镜像

# 下载zk镜像
docker pull zookeeper
# 下载kafka镜像
docker pull wurstmeister/kafka

2. 创建zk和kafka挂载目录

mkdir -p /data/zookeeper-cluster/zookeeper/data
mkdir -p /data/zookeeper-cluster/zookeeper/conf
mkdir -p /data/kafka-cluster/kafka/logs

3. 创建zk的zoo.cfg

cd /data/zookeeper-cluster/zookeeper/conf
touch zoo.cfg
vi zoo.cfg

三台主机设置相同的配置文件
clientPort=2181
dataDir=/data
dataLogDir=/data/log
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
server.0=192.168.0.33:2888:3888
server.1=192.168.0.34:2888:3888
server.2=192.168.0.35:2888:3888

4. 分配server id

# zk节点1
cd /data/zookeeper-cluster/zookeeper/data
touch myid
echo 0 > myid

# zk节点2
cd /data/zookeeper-cluster/zookeeper/data
touch myid
echo 1 > myid

# zk节点3
cd /data/zookeeper-cluster/zookeeper/data
touch myid
echo 2 > myid

5. 启动zk各节点容器

# zk三台主机启动相同配置
docker run \
--network host \
--restart always \
 -v /data/zookeeper-cluster/zookeeper/data:/data \
 -v /data/zookeeper-cluster/zookeeper/conf:/conf \
 --name zookeeper_node -d \
zookeeper:latest

# 查看节点状态
# 进入容器
docker exec -it zookeeper_node /bin/bash
# 执行查看命令,查看主从关系
cd bin
./zkServer.sh status

6. 启动kafka各节点容器

# 启动节点1
docker run -d --name kafka_node \
-p 9092:9092 --restart always \
-v /data/kafka-cluster/kafka/logs:/kafka \
-e KAFKA_BROKER_ID=1 \
-e KAFKA_ZOOKEEPER_CONNECT=192.168.0.33:2181,192.168.0.34:2181,192.168.0.35:2181 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.0.33:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-t wurstmeister/kafka:latest

# 启动节点2
docker run -d --name kafka_node \
-p 9092:9092 --restart always \
-v /data/kafka-cluster/kafka/logs:/kafka \
-e KAFKA_BROKER_ID=2 \
-e KAFKA_ZOOKEEPER_CONNECT=192.168.0.33:2181,192.168.0.34:2181,192.168.0.35:2181 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.0.34:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-t wurstmeister/kafka:latest

# 启动节点3
docker run -d --name kafka_node \
-p 9092:9092 --restart always \
-v /data/kafka-cluster/kafka/logs:/kafka \
-e KAFKA_BROKER_ID=3 \
-e KAFKA_ZOOKEEPER_CONNECT=192.168.0.33:2181,192.168.0.34:2181,192.168.0.35:2181 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://192.168.0.35:9092 \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-t wurstmeister/kafka:latest
  1. 验证kafka是否正确安装
# 进入一台主机的kafka容器
# 进入到kafka容器中 并创建topic 生产者,执行如下命令:
docker exec -it kafka_node /bin/bash
cd /opt/kafka_2.12-2.3.0/bin/
# 创建主题
./kafka-topics.sh --create --zookeeper 192.168.0.33:2181 --replication-factor 1 --partitions 8 --topic test
# 生产者
./kafka-console-producer.sh --broker-list 192.168.0.33:9092 --topic test

# 进入另一台主机的kafka容器
# 执行上诉命令后,另起一个标签页,执行如下命令 创建kafka消费者消费消息:
docker exec -it kafka_node /bin/bash
cd /opt/kafka_2.12-2.3.0/bin/
# 消费者
./kafka-console-consumer.sh --bootstrap-server 192.168.0.34:9092 --topic test --from-beginning
执行完上诉命令后,在生产者窗口中 输入任意内容回车,即可在消费者的窗口查看到消息

你可能感兴趣的:(docker运维部署之kafka集群部署)