一、简介
1.1、statefulset控制器简介
statefulset控制器是有状态应用副本集;在k8s集群statefulset用于管理以下副本集:
- 稳定且唯一的网络标识符
- 稳定且持久的存储
- 有序,平滑的部署和扩展
- 有序,平滑的删除和终止
- 有序的滚动更新
statefulset包含三个组件:headless service、StatefulSet,volumeClaimTemplate - headless service:确保解析名称直达后端pod
- volumeClaimTemplate:卷申请模板,每创建一个pod时,自动申请一个pvc,从而请求绑定pv
- StatefulSet:控制器
二、部署
2.1、创建pv与pvc
kubernetes上的存储卷类型
emptyDir:空目录,按需创建;删除pod后存储卷中的数据也被删除,一般用于临时目录或缓存
hostPath:主机路径,在node上找一个目录与pod中的容器建立联系
gitRepo:基于emptyDir实现的git仓库 clone到emptyDir存储卷
NFS:将现有NFS(网络文件系统)共享安装到Pod中
2.1.1、准备
master节点:
kubectl create ns bigdata
node节点:
mkdir -pv /data/pv/zk{1..3}
2.1.2、定义pv与pvc资源文件
实验环境为虚拟机环境,因此使用的存储卷类型为hostPath。
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk1
namespace: bigdata
labels:
type: local
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/pv/zk1
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk2
namespace: bigdata
labels:
type: local
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/pv/zk2
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk3
namespace: bigdata
labels:
type: local
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: /data/pv/zk3
persistentVolumeReclaimPolicy: Recycle
2.1.3、创建pv与pvc
kubectl apply -f zookeeper-pv.yaml
2.1.4、查看pv
[root@master ~]# kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
k8s-pv-zk1 2Gi RWO Recycle Available 6m42s
k8s-pv-zk2 2Gi RWO Recycle Available 6m42s
k8s-pv-zk3 2Gi RWO Recycle Available 6m42s
2.2、zk集群部署
2.2.1、官方资源清单修改
官方资源清单:https://kubernetes.io/zh/docs/tutorials/stateful-application/zookeeper/
apiVersion: v1
kind: Service
metadata:
name: zk-hs
namespace: bigdata
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
namespace: bigdata
labels:
app: zk
spec:
ports:
- port: 2181
name: client
selector:
app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: zk-pdb
namespace: bigdata
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
namespace: bigdata
spec:
selector:
matchLabels:
app: zk
serviceName: zk-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kubernetes-zookeeper
imagePullPolicy: IfNotPresent
image: "mirrorgcrio/kubernetes-zookeeper:1.0-3.4.10"
resources:
requests:
memory: "500Mi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 2Gi
2.2.2、创建zk集群
kubectl apply -f zookeeper.yaml
2.2.3、查看pod
[root@master ~]# kubectl get pods -n bigdata -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
zk-0 1/1 Running 0 109s 10.122.104.4 node2
zk-1 1/1 Running 0 95s 10.122.166.134 node1
zk-2 1/1 Running 0 78s 10.122.135.1 node3
2.2.4、查看pvc
[root@master ~]# kubectl get pvc -n bigdata -o wide
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
datadir-zk-0 Bound k8s-pv-zk1 2Gi RWO 19m Filesystem
datadir-zk-1 Bound k8s-pv-zk3 2Gi RWO 115s Filesystem
datadir-zk-2 Bound k8s-pv-zk2 2Gi RWO 98s Filesystem
2.2.5、查看zk配置
[root@master ~]# kubectl exec -n bigdata zk-0 -- cat /opt/zookeeper/conf/zoo.cfg
#This file was autogenerated DO NOT EDIT
clientPort=2181
dataDir=/var/lib/zookeeper/data
dataLogDir=/var/lib/zookeeper/data/log
tickTime=2000
initLimit=10
syncLimit=5
maxClientCnxns=60
minSessionTimeout=4000
maxSessionTimeout=40000
autopurge.snapRetainCount=3
autopurge.purgeInteval=12
server.1=zk-0.zk-hs.bigdata.svc.cluster.local:2888:3888
server.2=zk-1.zk-hs.bigdata.svc.cluster.local:2888:3888
server.3=zk-2.zk-hs.bigdata.svc.cluster.local:2888:3888
2.2.6、查看集群状态
[root@master ~]# kubectl exec -n bigdata zk-0 zkServer.sh status
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [C
OMMAND] instead.ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
[root@master ~]# kubectl exec -n bigdata zk-1 zkServer.sh status
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [C
OMMAND] instead.ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: leader
[root@master ~]# kubectl exec -n bigdata zk-2 zkServer.sh status
kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl kubectl exec [POD] -- [C
OMMAND] instead.ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Mode: follower
参考文章:
官方zk实例:https://kubernetes.io/zh/docs/tutorials/stateful-application/zookeeper/
资源清单相关:https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.18/#statefulset-v1-apps