【6.9.6】服务器安装 Docker Swarm中搭建 Kafka 集群

接 zookeeper 部分 添加 kafka功能 ,

#docker-compose.yaml
version: '3'

services:
  zoo1:
    image: zookeeper:3.6.1
    restart: always
    hostname: zoo1
    ports:
      - 2181:2181
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - /home/zk/zookeeper1/data:/data
      - /home/zk/zookeeper1/datalog:/datalog
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
    networks:
      zookeeper_default:
    deploy:
      placement:
        constraints:
          - node.hostname == worker1      

  zoo2:
    image: zookeeper:3.6.1
    restart: always
    hostname: zoo2
    ports:
      - 2182:2181
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - /home/zk/zookeeper2/data:/data
      - /home/zk/zookeeper2/datalog:/datalog 
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
    networks:
      zookeeper_default:
    deploy:
      placement:
        constraints:
          - node.hostname == worker1
  zoo3:
    image: zookeeper:3.6.1
    restart: always
    hostname: zoo3
    ports:
      - 2183:2181
    volumes:
      - /etc/localtime:/etc/localtime:ro
      - /home/zk/zookeeper3/data:/data
      - /home/zk/zookeeper3/datalog:/datalog
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
    networks:
      zookeeper_default:
    deploy:
      placement:
        constraints:
          - node.hostname == worker1     
  
  kafka0:
    hostname: kafka0
    container_name: kafka0
    image: wurstmeister/kafka:2.13-2.7.0
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "19092:9092"
    volumes:
      - "/home/kafka/k0:/kafka"      
    environment:
      TZ: "Asia/Shanghai" 
      KAFKA_BROKER_ID: 0
      KAFKA_ZOOKEEPER_CONNECT: 192.168.0.105:2181 
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.0.105:19092 
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092 
    deploy:
      placement:
        constraints:
          - node.hostname == manager       
  kafka1:
    hostname: kafka1
    container_name: kafka1
    image: wurstmeister/kafka:2.13-2.7.0
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "19093:9093"
    volumes:
      - "/home/kafka/k1:/kafka"      
    environment:
      TZ: "Asia/Shanghai" 
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: 192.168.0.105:2181 
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.0.105:19093 
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9093  
    deploy:
      placement:
        constraints:
          - node.hostname == manager
  kafka2:
    hostname: kafka2
    container_name: kafka2
    image: wurstmeister/kafka:2.13-2.7.0
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "19094:9094"
    volumes:
      - "/home/kafka/k2:/kafka"      
    environment:
      TZ: "Asia/Shanghai" 
      KAFKA_BROKER_ID: 2
      KAFKA_ZOOKEEPER_CONNECT: 192.168.0.105:2181 
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.0.105:19094 
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9094       
    deploy:
      placement:
        constraints:
          - node.hostname == manager 
  kafka-manager:
    image: sheepkiller/kafka-manager            # 如果要安装web管理工具可以同时安装这个,最后通过苏主机IP的9000端口进行访问,例如172.31.148.174:9000
    environment:
      ZK_HOSTS: 192.168.0.105:2181,192.168.0.105:2182,192.168.0.105:2183
      APPLICATION_SECRET: "letmein"
    ports:
      - "9005:9000"
    expose:
      - "9000"
    depends_on:
      - kafka2
      - kafka1
      - kafka0
    deploy:
      placement:
        constraints:
          - node.hostname == manager      
  

访问http://192.168.0.103:9005
第一次进入kafka-manager 需要添加zookeeper的信息

image.png

这里添加宿主机的映射的ip端口 确定之后这样的
image.png

这里因为我之前使用过,进入zk/topic就能看到kafka中之前创建的topic


image.png

点波关注 系统搭建(docker)

你可能感兴趣的:(【6.9.6】服务器安装 Docker Swarm中搭建 Kafka 集群)