k8s集群创建kafka集群暴露外部使用

直接上yaml资源文件

创建zookeeper集群资源文件,

kubectl apply -f zk-sts.yaml -n namespace

apiVersion: v1
kind: Service
metadata:
  name: zookeeper-hs
  labels:
    app: zookeeper
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zookeeper
---
apiVersion: v1
kind: Service
metadata:
  name: zookeeper
  labels:
    app: zookeeper
spec:
  ports:
  - port: 2181
    name: zookeeper-client
  selector:
    app: zookeeper

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zookeeper
spec:
  selector:
    matchLabels:
      app: zookeeper
  serviceName: zookeeper-hs
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: zookeeper
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - zookeeper
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: zookeeper
        imagePullPolicy: Always
        image: "fastop/zookeeper:3.4.10"
        command:
          - sh
          - -c
          - "start-zookeeper \
                  --servers=3 \
                  --data_dir=/var/lib/zookeeper/data \
                  --data_log_dir=/var/lib/zookeeper/data/log \
                  --conf_dir=/opt/zookeeper/conf \
                  --client_port=2181 \
                  --election_port=3888 \
                  --server_port=2888 \
                  --tick_time=2000 \
                  --init_limit=10 \
                  --sync_limit=5 \
                  --heap=512M \
                  --max_client_cnxns=60 \
                  --snap_retain_count=3 \
                  --purge_interval=12 \
                  --max_session_timeout=40000 \
                  --min_session_timeout=4000 \
                  --log_level=INFO"
        ports:
          - containerPort: 2181
            name: client
          - containerPort: 2888
            name: server
          - containerPort: 3888
            name: leader-election
        volumeMounts:
        - name: zookeeper-data
          mountPath: /var/lib/zookeeper
        - mountPath: /etc/localtime
          readOnly: true 
          name: time-data
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
      volumes: 
      - name: time-data 
        hostPath: 
          path: /usr/share/zoneinfo/Asia/Shanghai
  volumeClaimTemplates:
  - metadata:
      name: zookeeper-data
      labels:
        app: zk
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: glusterfs
      resources:
        requests:
          storage: 10Gi

创建kafka集群资源文件

apiVersion: v1
kind: Service
metadata:
  name: kafka-svc
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    name: server
  clusterIP: None
  selector:
    app: kafka

---
apiVersion: v1
kind: Service
metadata:
  name: kafka-0
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    targetPort: 9092
    nodePort: 30092
    name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: kafka-0
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-1
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    targetPort: 9092
    nodePort: 30093
    name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: kafka-1
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-2
  labels:
    app: kafka
spec:
  ports:
  - port: 9092
    targetPort: 9092
    nodePort: 30094
    name: server
  type: NodePort
  selector:
    statefulset.kubernetes.io/pod-name: kafka-2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
spec:
  selector:
    matchLabels:
      app: kafka
  serviceName: kafka
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: kafka
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - kafka
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kafka
        command:
          - bash
          - -ec
          - |
            HOSTNAME=`hostname -s`
            if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
              ORD=${BASH_REMATCH[2]}
              PORT=$((ORD + 30092))
              export KAFKA_CFG_ADVERTISED_LISTENERS="PLAINTEXT://192.168.1.106:$PORT"
            else
              echo "Failed to get index from hostname $HOST"
              exit 1
            fi
            exec /entrypoint.sh /run.sh
        image: "bitnami/kafka:2"
        env:
          - name: ALLOW_PLAINTEXT_LISTENER
            value: "yes"
          - name: KAFKA_CFG_ZOOKEEPER_CONNECT
            value: "zookeeper-0.zookeeper-hs:2181,zookeeper-1.zookeeper-hs:2181,zookeeper-2.zookeeper-hs:2181"
          - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR
            value: "3"
          - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR
            value: "3"
          - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR
            value: "3"
          - name: KAFKA_HEAP_OPTS
            value: "-Xmx6g -Xms6g"
          - name: KAFKA_CFG_MESSAGE_MAX_BYTES
            value: "100000000"
          - name: KAFKA_CFG_LOG_RETENTION_HOURS
            value: "24"
          - name: KAFKA_CFG_MAX_PARTITION_FETCH_BYTES
            value: "200000000"
        ports:
          - containerPort: 9092
        volumeMounts:
          - name: kafka-data
            mountPath: /bitnami
          - mountPath: /etc/localtime
            readOnly: true 
            name: time-data
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
      volumes: 
      - name: time-data 
        hostPath: 
          path: /usr/share/zoneinfo/Asia/Shanghai
  volumeClaimTemplates:
  - metadata:
      name: kafka-data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: glusterfs
      resources:
        requests:
          storage:  40G

执行kubeclt apply -f ka-sts.yaml -n namespace

访问方式通过nodeIp:30092,nodeIp30093,nodeIp30094

你可能感兴趣的:(kubernetes,kafka)