使用docker-compose部署zookeeper和kafka集群

一、总体架构描述

虚拟机 centos7
kafka(三节点:broker1,broker2,broker3) IP:172.23.0.14,172.23.0.15,172.23.0.16
zookeeper(三节点:zoo1,zoo2,zoo3) IP:172.23.0.11,172.23.0.12,172.23.0.13

ip均为docker内构建的虚拟静态地址。

二、搭建步骤

(一)虚拟机内搭建好安装好docker;
(二)拉取镜像

docker pull wurstmeister/kafka
docker pull zookeeper:3.4
docker pull sheepkiller/kafka-manager:latest

【遇到的问题及解决】
pull镜像很慢,然后

##使用阿里云镜像加速器
[root@localhost ~]# mkdir -p /etc/docker
[root@localhost ~]# tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://9cpn8tt6.mirror.aliyuncs.com"]
}
EOF
[root@localhost ~]# systemctl daemon-reload
[root@localhost ~]# systemctl restart docker

(三)创建集群虚拟网络

docker network create --driver bridge --subnet 172.23.0.0/16 --gateway 172.23.0.1 zoo_kafka

查看网络

[root@localhost kafka]# docker network ls
NETWORK ID          NAME                DRIVER              SCOPE
2930abd42b08        bridge              bridge              local
6bf68d1f8d9f        host                host                local
b941fd14364a        kafka               bridge              local
4910af7c6c6f        none                null                local
0a0931acdf04        viemall-zookeeper   bridge              local
e8a38e297f2d        zoo_kafka           bridge              local

(四)部署zookeeper

mkdir zookeeper
cd zookeeper
vim docker-compose.yml

docker-compose.yml内容如下:

version: '2'
services:
 zoo1:
  image: zookeeper:3.4 # 镜像名称
  restart: always # 当发生错误时自动重启
  hostname: zoo1
  container_name: zoo1
  privileged: true
  ports: # 端口
   - 2181:2181
  volumes: # 挂载数据卷
   - ./zoo1/data:/data
   - ./zoo1/datalog:/datalog 
  environment:
   TZ: Asia/Shanghai
   ZOO_MY_ID: 1 # 节点ID
   ZOO_PORT: 2181 # zookeeper端口号
   ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888 # zookeeper节点列表
  networks:
   default:
    ipv4_address: 172.23.0.11

 zoo2:
  image: zookeeper:3.4
  restart: always
  hostname: zoo2
  container_name: zoo2
  privileged: true
  ports:
   - 2182:2181
  volumes:
   - ./zoo2/data:/data
   - ./zoo2/datalog:/datalog
  environment:
   TZ: Asia/Shanghai
   ZOO_MY_ID: 2
   ZOO_PORT: 2181
   ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
  networks:
   default:
    ipv4_address: 172.23.0.12

 zoo3:
  image: zookeeper:3.4
  restart: always
  hostname: zoo3
  container_name: zoo3
  privileged: true
  ports:
   - 2183:2181
  volumes:
   - ./zoo3/data:/data
   - ./zoo3/datalog:/datalog
  environment:
   TZ: Asia/Shanghai
   ZOO_MY_ID: 3
   ZOO_PORT: 2181
   ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
  networks:
   default:
    ipv4_address: 172.23.0.13

networks:
 default:
  external:
   name: zoo_kafka

在/root/zookeeper路径下执行:

docker-compose up -d

正常情况下都没啥问题,部署成功。

[root@localhost ~]# docker-compose ps
Name              Command               State                     Ports                   
------------------------------------------------------------------------------------------
zoo1   /docker-entrypoint.sh zkSe ...   Up      0.0.0.0:2181->2181/tcp, 2888/tcp, 3888/tcp
zoo2   /docker-entrypoint.sh zkSe ...   Up      0.0.0.0:2182->2181/tcp, 2888/tcp, 3888/tcp
zoo3   /docker-entrypoint.sh zkSe ...   Up      0.0.0.0:2183->2181/tcp, 2888/tcp, 3888/tcp

(五)部署kafka

mkdir /root/kafka
cd /root/kafka
vim docker-compose.yml

docker-compose.yml内容:

version: '2'

services:
 broker1:
  image: wurstmeister/kafka
  restart: always
  hostname: broker1
  container_name: broker1
  privileged: true
  ports:
   - "9091:9092"
  environment:
   KAFKA_BROKER_ID: 1
   KAFKA_LISTENERS: PLAINTEXT://broker1:9092
   KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker1:9092
   KAFKA_ADVERTISED_HOST_NAME: broker1
   KAFKA_ADVERTISED_PORT: 9092
   KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
   #JMX_PORT: 9987
  volumes:
   - /var/run/docker.sock:/var/run/docker.sock
   - ./broker1:/kafka/kafka\-logs\-broker1
  external_links:
  - zoo1
  - zoo2
  - zoo3
  networks:
   default:
    ipv4_address: 172.23.0.14

 broker2:
  image: wurstmeister/kafka
  restart: always
  hostname: broker2
  container_name: broker2
  privileged: true
  ports:
   - "9092:9092"
  environment:
   KAFKA_BROKER_ID: 2
   KAFKA_LISTENERS: PLAINTEXT://broker2:9092
   KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker2:9092
   KAFKA_ADVERTISED_HOST_NAME: broker2
   KAFKA_ADVERTISED_PORT: 9092
   KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
   #JMX_PORT: 9988
  volumes:
   - /var/run/docker.sock:/var/run/docker.sock
   - ./broker2:/kafka/kafka\-logs\-broker2
  external_links: # 连接本compose文件以外的container
  - zoo1
  - zoo2
  - zoo3
  networks:
   default:
    ipv4_address: 172.23.0.15

 broker3:
  image: wurstmeister/kafka
  restart: always
  hostname: broker3
  container_name: broker3
  privileged: true
  ports:
   - "9093:9092"
  environment:
   KAFKA_BROKER_ID: 3
   KAFKA_LISTENERS: PLAINTEXT://broker3:9092
   KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker3:9092
   KAFKA_ADVERTISED_HOST_NAME: broker3
   KAFKA_ADVERTISED_PORT: 9092
   KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
   #JMX_PORT: 9989
  volumes:
   - /var/run/docker.sock:/var/run/docker.sock
   - ./broker3:/kafka/kafka\-logs\-broker3
  external_links: # 连接本compose文件以外的container
  - zoo1
  - zoo2
  - zoo3
  networks:
   default:
    ipv4_address: 172.23.0.16

 kafka-manager:
  image: sheepkiller/kafka-manager:latest
  restart: always
  container_name: kafka-manager
  hostname: kafka-manager
  ports:
   - "9000:9000"
  links:      # 连接本compose文件创建的container
   - broker1
   - broker2
   - broker3
  external_links:  # 连接本compose文件以外的container
   - zoo1
   - zoo2
   - zoo3
  environment:
   ZK_HOSTS: zoo1:2181,zoo2:2181,zoo3:2181
   KAFKA_BROKERS: broker1:9092,broker2:9092,broker3:9092
   APPLICATION_SECRET: letmein
   KM_ARGS: -Djava.net.preferIPv4Stack=true
  networks:
   default:
    ipv4_address: 172.23.0.10

networks:
 default:
  external:  # 使用已创建的网络
   name: zoo_kafka

在/root/kafka下执行

docker-compose up -d

这一步理论上也没啥问题

[root@localhost kafka]# docker-compose ps
    Name                Command            State           Ports         
-------------------------------------------------------------------------
broker1         start-kafka.sh             Up      0.0.0.0:9091->9092/tcp
broker2         start-kafka.sh             Up      0.0.0.0:9092->9092/tcp
broker3         start-kafka.sh             Up      0.0.0.0:9093->9092/tcp
kafka-manager   ./start-kafka-manager.sh   Up      0.0.0.0:9000->9000/tcp

(六)测试是否搭建成功

docker exec -it broker1 /bin/bash

测试

bash-4.4# kafka-topics.sh --create --zookeeper 172.23.0.11:2181 --replication-factor 1 --partitions 1 --topic test
Created topic test.
bash-4.4# 
bash-4.4# 
bash-4.4# kafka-console-producer.sh --broker-list 172.23.0.16:9092 --topic test
>你好!
>^Cbash-4.4# kafconsole-consumer.sh --bootstrap-server 172.23.0.16:9092 --topic test --from-beginningest
你好!

【遇到的问题及解决】
(1)当初搭建时参考的博文是:https://www.jb51.net/article/168749.htm
但是完全参照走步骤下来,这边会报找不到jmx端口的错,这个问题注释掉docker-compose.yml 中JMX_PORT后,再启动解决。
(2)当时注释了JMX_PORT后重新启动,发现进去某个容器,一段时间就会自动退出,kafka容器全变restart状态了,查了很多资料,大概是这个问题 https://lixiaogang5.blog.csdn.net/article/details/105679680?utm_medium=distribute.pc_relevant_t0.none-task-blog-BlogCommendFromMachineLearnPai2-1.channel_param&depth_1-utm_source=distribute.pc_relevant_t0.none-task-blog-BlogCommendFromMachineLearnPai2-1.channel_param 解决办法是进去删掉那个文件,但问题来了,我每次进去都没来得及删就自动退出,这个没办法,只好删掉容器,然后重新搭建kafka。这次选择先stop掉zookeeper的容器,然后再删除/root/kafka/docker-compose.yml 中KAFKA_ZOOKEEPER_CONNECT 所有/kafka1.问题解决。

你可能感兴趣的:(kafka,zookeeper)