sudo docker run -dit \
--net docker-hadoop-net \
--ip 172.170.0.12 \
--restart=always \
--hostname=hadoop_zookeeper \
--name=hadoop-zookeeper \
-p 12181:2181 \
-v /usr/docker/software/zookeeper/data/:/data/ \
-v /usr/docker/software/zookeeper/datalog/:/datalog/ \
-v /usr/docker/software/zookeeper/logs/:/logs/ \
-v /etc/localtime:/etc/localtime \
-e TZ='Asia/Shanghai' \
-e LANG="en_US.UTF-8" \
zookeeper:3.5.6
解释:
docker-keda-br0 : 是自定义的网卡
docker network create --driver bridge --subnet=172.170.0.0/24 --gateway 172.170.0.254 docker-hadoop-net
ip 172.172.0.13: 配置的 ip地址
label zone=keda-elk: 配置的docker label ,用于方便管理,可以没有
-v : 是配置 挂载目录
--name=keda_zookeeper_server: 即 zookeeper容器的名称
1、先启动一个简洁的kafka容器
sudo docker run -dit \
--restart=always \
--privileged=true \
--hostname=hadoop_kafka \
--name=hadoop-kafka \
-p 19092:9092 \
-v /etc/localtime:/etc/localtime \
-e TZ='Asia/Shanghai' \
-e LANG="en_US.UTF-8" \
wurstmeister/kafka:2.12-2.4.0
2、拷贝 容器内的文件,到宿主机
docker cp hadoop-kafka:/opt/kafka/config/ /usr/docker/software/kafka/
docker cp hadoop-kafka:/opt/kafka/libs/ /usr/docker/software/kafka/
docker cp hadoop-kafka:/kafka/ /usr/docker/software/kafka/
3、 停止/删除容器
docker stop hadoop-kafka
docker rm hadoop-kafka
///以上操作,主要是为了,获取kafka的配置文件信息,用于 挂载启动
4、正式启动kafka
sudo docker run -dit \
--net docker-hadoop-net \
--ip 172.170.0.13 \
--restart=always \
--privileged=true \
--hostname=hadoop_kafka \
--name=hadoop-kafka \
-p 19092:9092 \
-v /usr/docker/software/kafka/config/:/opt/kafka/config/ \
-v /usr/docker/software/kafka/libs/:/opt/kafka/libs/ \
-v /usr/docker/software/kafka/logs/:/kafka/ \
-v /etc/localtime:/etc/localtime \
-e KAFKA_BROKER_ID=0 \
-e KAFKA_ZOOKEEPER_CONNECT=hadoop-zookeeper:2181/kafka \
-e KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092 \
-e KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://hadoop-kafka:9092 \
-e KAFKA_ADVERTISED_HOST_NAME=hadoop-kafka \
-e KAFKA_ADVERTISED_PORT=9092 \
-e TZ='Asia/Shanghai' \
-e LANG="en_US.UTF-8" \
wurstmeister/kafka:2.12-2.4.0
解释:
hadoop-zookeeper :链接到 上面创建的 zookeeper, 即 容器的 名称
hadoop-kafka : 就是当期kafka容器的名称
a. 查看kafka的 CONTAINER ID :
docker ps --filter name=kafka
假设获取到的ID为:ba2540992d9e
b. 使用ID进入容器:
docker exec -it ba2540992d9e /bin/bash
c. 测试消息发送
在容器命令界面(具体进入/opt/kafkaxxxx 要ls一下 查看自己拉取的哪个版本的镜像产生的容器):
cd /opt/kafka_2.12-2.3.0/bin
d、 创建一个主题名为mytopic:
./kafka-topics.sh --create --zookeeper zookeeper:2181 --replication-factor 1 --partitions 1 --topic mytopic
e、 运行一个生产者:
bin/kafka-console-producer.sh --broker-list localhost:9092 --topic mytopic
f、 生产者发送消息
/opt/kafka_2.12-2.4.0/bin # ./kafka-console-producer.sh --broker-list localhost:9092 --topic mytopic
>{"datas":[{"channel":"","metric":"temperature","producer":"ijinus","sn":"IJA0101-00002245","time":"1543207156000","value":"80"}],"ver":"1.0"}
g 、 在另一个窗口打开容器命令窗口,运行一个消费者
/opt/kafka_2.12-2.4.0/bin # ./kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic mytopic--from-beginning
{"datas":[{"channel":"","metric":"temperature","producer":"ijinus","sn":"IJA0101-00002245","time":"1543207156000","value":"80"}],"ver":"1.0"}