PV 和 PVC 之间的关系是一种动态的供需匹配关系。PVC 表示应用程序对持久化存储的需求,而 PV 表示可用的持久化存储资源。Kubernetes 控制平面会根据 PVC 的需求来选择和绑定合适的 PV,将其挂载到应用程序的 Pod 中,从而使应用程序可以访问持久化存储。
PV可以静态或动态的创建;PV和PVC必须一一对应;PVC如果没有对应的绑定PV则会Pending
PVC被删除后,PV内的数据有两种处理策略分别是Retain保留(默认)、Delete删除
接下来的实验中会对这几种模式进行测试,测试结果发现并没有什么区别(k8s1.26)
#绑定master2节点的/dirfornfs
yum -y install nfs-utils
#创建一个新的nfs目录,并添加到/etc/exports文件中
mkdir -p /dirfornfs/{1..5}
#
echo "/dirfornfs *(rw,no_root_squash)
/dirfornfs/1 *(rw,no_root_squash)
/dirfornfs/2 *(rw,no_root_squash)
/dirfornfs/3 *(rw,no_root_squash)
/dirfornfs/4 *(rw,no_root_squash)
/dirfornfs/5 *(rw,no_root_squash)" > /etc/exports
#创建pv资源
cat > jintai-PV.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: jintai-pv1
labels:
stor: pv1
spec:
nfs:
server: 192.168.8.159
path: /dirfornfs/1
accessModes: ["ReadWriteOnce"] #访问模式 只支持同一node的读写
capacity:
storage: 1.5Gi #分配1.5个G
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jintai-pv2
labels:
stor: pv2
spec:
nfs:
server: 192.168.8.159
path: /dirfornfs/2
accessModes: ["ReadWriteMany"] #支持多个node读写
capacity:
storage: 2Gi #分配2个G
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jintai-pv3
labels:
stor: pv3
spec:
nfs:
server: 192.168.8.159
path: /dirfornfs/3
accessModes: ["ReadOnlyMany"] #多个node只读
capacity:
storage: 3Gi #分配3个G
EOF
kubectl apply -f jintai-PV.yaml
kubectl get pv
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS
jintai-pv1 1536Mi RWO Retain Available #单节点读写
jintai-pv2 2Gi RWX Retain Available #多节点读写
jintai-pv3 3Gi ROX Retain Available #多节点只读
#创建pvc
cat > pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc1
spec:
accessModes: ["ReadWriteOnce"] #对应的pv必须访问模式保持相同
selector:
matchLabels:
stor: pv1
resources:
requests:
storage: 1.5Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc2
spec:
accessModes: ["ReadWriteMany"] #对应的pv必须访问模式保持相同
selector:
matchLabels:
stor: pv2
resources:
requests:
storage: 2Gi
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pvc3
spec:
accessModes: ["ReadOnlyMany"] #对应的pv必须访问模式保持相同
selector:
matchLabels:
stor: pv3 #对应上pv的标签
resources:
requests:
storage: 3Gi
EOF
kubectl apply -f pvc.yaml
kubectl get pvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
pvc1 Bound jintai-pv1 1536Mi RWO 54s
pvc2 Bound jintai-pv2 2Gi RWX 54s
pvc3 Bound jintai-pv3 3Gi ROX 54s
#创建pod1,让pvc1挂载上去
cat > pod-pvc.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-pvc1
spec:
replicas: 3
selector:
matchLabels:
stor: pvc
template:
metadata:
labels:
stor: pvc
spec:
containers:
- name: test
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc1
volumes:
- name: pvc1
persistentVolumeClaim:
claimName: pvc1
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-pvc2
spec:
replicas: 3
selector:
matchLabels:
stor: pvc
template:
metadata:
labels:
stor: pvc
spec:
containers:
- name: test
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc2
volumes:
- name: pvc2
persistentVolumeClaim:
claimName: pvc2
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pod-pvc3
spec:
replicas: 3
selector:
matchLabels:
stor: pvc
template:
metadata:
labels:
stor: pvc
spec:
containers:
- name: test
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc3
volumes:
- name: pvc3
persistentVolumeClaim:
claimName: pvc3
EOF
kubectl apply -f pod-pvc.yaml
kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-pvc1-69b655447-5zmjn 1/1 Running 0 95s 10.10.179.12 ws-k8s-node1
pod-pvc1-69b655447-crnfr 1/1 Running 0 95s 10.10.179.11 ws-k8s-node1
pod-pvc1-69b655447-kzpf5 1/1 Running 0 95s 10.10.234.75 ws-k8s-node2
pod-pvc2-697979cddb-6x658 1/1 Running 0 95s 10.10.179.13 ws-k8s-node1
pod-pvc2-697979cddb-bxcxm 1/1 Running 0 95s 10.10.179.15 ws-k8s-node1
pod-pvc2-697979cddb-zffwh 1/1 Running 0 95s 10.10.234.74 ws-k8s-node2
pod-pvc3-7588fbc489-2v8pt 1/1 Running 0 95s 10.10.179.14 ws-k8s-node1
pod-pvc3-7588fbc489-5scpd 1/1 Running 0 95s 10.10.234.76 ws-k8s-node2
pod-pvc3-7588fbc489-b7cp9 1/1 Running 0 95s 10.10.234.77 ws-k8s-node2
#进入不同node节点的pod查看是否同步
#pvc1
kubectl exec -it pod-pvc1-69b655447-5zmjn -- /bin/bash
cd /usr/share/nginx/html/
touch 11
exit
kubectl exec -it pod-pvc1-69b655447-kzpf5 -- /bin/bash
ls /usr/share/nginx/html/11
/usr/share/nginx/html/11 #不同节点依然可以同时访问到这个pv
#pvc2也可以,略过了
#pvc3 ACCESS MODES为ROX,无法创建
root@pod-pvc3-7588fbc489-b7cp9:/# touch 123454 /usr/share/nginx/html/
root@pod-pvc3-7588fbc489-b7cp9:/#
root@pod-pvc3-7588fbc489-b7cp9:/# ls /usr/share/nginx/html/
root@pod-pvc3-7588fbc489-b7cp9:/# 无输出
#
#删除
kubectl delete -f pod-pvc.yaml
kubectl delete -f pvc.yaml
kubectl delete -f jintai-PV.yaml
#启用
kubectl apply -f jintai-PV.yaml
kubectl apply -f pvc.yaml
kubectl apply -f pod-pvc.yaml
kubectl exec -it pod-pvc1-69b655447-46h5h -- /bin/bash
ls /usr/share/nginx/html/
11 #依然保留了数据
#修改回收策略
#
vim jintai-PV.yaml
...
capacity:
storage: 1.5Gi #分配1.5个G
persistentVolumeReclaimPolicy: Delete #回收策略为Delete
---
...
kubectl delete -f pod-pvc.yaml
kubectl delete -f pvc.yaml
kubectl delete -f jintai-PV.yaml
kubectl apply -f jintai-PV.yaml
kubectl apply -f pvc.yaml
kubectl apply -f pod-pvc.yaml
#创建一个新pod关联pvc1
cat > pod-test.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: pod-pvc-test
spec:
containers:
- name: test10
image: docker.io/library/nginx
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /usr/share/nginx/html
name: pvc1
volumes:
- name: pvc1
persistentVolumeClaim:
claimName: pvc1
EOF
kubectl apply -f pod-test.yaml
#使用测试pod新建文件
kubectl exec -it pod-pvc-test -- /bin/bash
cd /usr/share/nginx/html/
mkdir 123
exit
#进入另一个pod查看
kubectl exec -it pod-pvc1-69b655447-7lxwl -- /bin/bash
ls /usr/share/nginx/html/
123 12345
#删除新建文件的测试pod
kubectl delete -f pod-test.yaml
#在另一个查看
ls /usr/share/nginx/html/
123 12345 #依然存在
#
#回收策略Delete和Retain没什么区别,都不会被删除
#清理
kubectl delete -f pod-pvc.yaml
kubectl delete -f pvc.yaml
kubectl delete -f jintai-PV.yaml
#查看帮助
kubectl explain storageclass
allowVolumeExpansion # 是否允许持久卷的扩展,不能支持缩小
allowedTopologies <[]Object> # 定义允许使用该StorageClass的节点拓扑约束
apiVersion
kind
metadata