记录Kubernetes使用StatefulSet方式部署Zookeeper集群。
1、Zookeeper镜像下载
docker pull bitnami/zookeeper:3.8.0-debian-10-r0
2、准备PVC
使用动态PVC,PVC创建过程略过
3、部署Zookeeper集群
Yaml文件包含configmap、service、StatefulSet。
vim zookeeper-test.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: zk-scripts
namespace: default
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
data:
init-certs.sh: |-
#!/bin/bash
setup.sh: |-
#!/bin/bash
if [[ -f "/bitnami/zookeeper/data/myid" ]]; then
export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)"
else
HOSTNAME="$(hostname -s)"
if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then
ORD=${BASH_REMATCH[2]}
export ZOO_SERVER_ID="$((ORD + 1 ))"
else
echo "Failed to get index from hostname $HOST"
exit 1
fi
fi
exec /entrypoint.sh /run.sh
---
apiVersion: v1
kind: Service
metadata:
name: zk-headless
namespace: default
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
spec:
type: ClusterIP
clusterIP: None
publishNotReadyAddresses: true
ports:
- name: tcp-client
port: 2181
targetPort: client
- name: tcp-follower
port: 2888
targetPort: follower
- name: tcp-election
port: 3888
targetPort: election
selector:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
---
apiVersion: v1
kind: Service
metadata:
name: zk-test
namespace: default
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: tcp-client
port: 2181
targetPort: client
nodePort: null
- name: tcp-follower
port: 2888
targetPort: follower
- name: tcp-election
port: 3888
targetPort: election
selector:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk-test
namespace: default
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
role: zookeeper
spec:
replicas: 3
podManagementPolicy: Parallel
selector:
matchLabels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
serviceName: zk-headless
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
annotations:
labels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
spec:
serviceAccountName: default
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: zookeeper
app.kubernetes.io/component: zookeeper
namespaces:
- "default"
topologyKey: kubernetes.io/hostname
weight: 1
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: zookeeper
image: bitnami/zookeeper:3.8.0-debian-10-r0
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
command:
- /scripts/setup.sh
resources:
limits:
cpu: 0.5
memory: 1Gi
requests:
cpu: 1
memory: 2Gi
env:
- name: BITNAMI_DEBUG
value: "false"
- name: ZOO_DATA_LOG_DIR
value: ""
- name: ZOO_PORT_NUMBER
value: "2181"
- name: ZOO_TICK_TIME
value: "2000"
- name: ZOO_INIT_LIMIT
value: "10"
- name: ZOO_SYNC_LIMIT
value: "5"
- name: ZOO_PRE_ALLOC_SIZE
value: "65536"
- name: ZOO_SNAPCOUNT
value: "100000"
- name: ZOO_MAX_CLIENT_CNXNS
value: "60"
- name: ZOO_4LW_COMMANDS_WHITELIST
value: "srvr, mntr, ruok"
- name: ZOO_LISTEN_ALLIPS_ENABLED
value: "no"
- name: ZOO_AUTOPURGE_INTERVAL
value: "0"
- name: ZOO_AUTOPURGE_RETAIN_COUNT
value: "3"
- name: ZOO_MAX_SESSION_TIMEOUT
value: "40000"
- name: ZOO_SERVERS
value: zk-test-0.zk-headless.default.svc.cluster.local:2888:3888::1 zk-test-1.zk-headless.default.svc.cluster.local:2888:3888::2 zk-test-2.zk-headless.default.svc.cluster.local:2888:3888::3
- name: ZOO_ENABLE_AUTH
value: "no"
- name: ZOO_HEAP_SIZE
value: "1024"
- name: ZOO_LOG_LEVEL
value: "ERROR"
- name: ALLOW_ANONYMOUS_LOGIN
value: "yes"
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
ports:
- name: client
containerPort: 2181
- name: follower
containerPort: 2888
- name: election
containerPort: 3888
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok']
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok']
volumeMounts:
- name: scripts
mountPath: /scripts/setup.sh
subPath: setup.sh
- name: zookeeper-data
mountPath: /bitnami/zookeeper
volumes:
- name: scripts
configMap:
name: zk-scripts
defaultMode: 0755
volumeClaimTemplates:
- metadata:
name: zookeeper-data
spec:
storageClassName: nfs-client
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Gi
更新资源清单
kubectl apply -f zookeeper-test.yaml
查看创建的资源清单
kubectl get pod,svc,configmap
查看zookeeper配置
kubectl exec -it zk-test-0 cat /opt/bitnami/zookeeper/conf/zoo.cfg
查看集群状态
kubectl exec -it zk-test-0 /opt/bitnami/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
kubectl exec -it zk-test-1 /opt/bitnami/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
kubectl exec -it zk-test-2 /opt/bitnami/zookeeper/bin/zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/bitnami/zookeeper/bin/../conf/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader
Zookeeper 集群部署完成。