注意:安装ceph-common软件包推荐使用软件包源与Ceph集群源相同,软件版本一致。
cat > /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=0
priority=1
EOF
yum install ceph-common -y
[root@ceph1 ~]# ssh-copy-id master
[root@ceph1 ~]# ssh-copy-id node1
[root@ceph1 ~]# ssh-copy-id node2
[root@ceph1 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph1 ceph2 ceph3 master node1 node2
创建kube池给k8s
ceph osd pool create kube 128 128
#查看ceph集群中的认证用户及相关的key
# ceph auth list
删除集群中的一个认证用户
ceph auth del osd.0 #(这里只是给个删除用户命令,请勿执行啊!!)
#创建用户
ceph auth get-or-create client.kube mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=kube'
ceph auth get-key client.admin | base64
ceph auth get-key client.kube | base64
#base64 单向加密一下,k8s不以明文方式存储账号密码
mkdir jtpv && cd jtpv
cat > ceph-admin-secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
name: ceph-admin-secret
namespace: default
data:
key: #( admin 的key)
type:
kubernetes.io/rbd
EOF
cat > ceph-kube-secret.yaml << EOF
apiVersion: v1
kind: Secret
metadata:
name: ceph-kube-secret
namespace: default
data:
key: #( kube 的key)
type:
kubernetes.io/rbd
EOF
cat > pv.yaml << EOF
apiVersion: v1
kind: PersistentVolume
metadata:
name: ceph-pv-test
spec:
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
rbd:
monitors:
- 10.100.100.115:6789
- 10.100.100.116:6789
- 10.100.100.117:6789
pool: kube
image: ceph-image
user: admin
secretRef:
name: ceph-admin-secret
fsType: ext4
readOnly: false
persistentVolumeReclaimPolicy: Retain
EOF
#创建镜像
rbd create -p kube -s 5G ceph-image
# rbd ls -p kube
ceph-image
# rbd info ceph-image -p kube
cat > pvc.yaml << EOF
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ceph-test-claim
spec:
accessModes:
- ReadWriteOnce
volumeName: ceph-pv-test
resources:
requests:
storage: 5Gi
EOF
cat > pod.yaml << EOF
apiVersion: v1
kind: Pod
metadata:
name: ceph-pod
spec:
containers:
- name: test-pod
image: busybox:1.24
command: ["sleep", "60000"]
volumeMounts:
- name: pvc
mountPath: /usr/share/busybox
readOnly: false
volumes:
- name: pvc
persistentVolumeClaim:
claimName: ceph-test-claim
EOF
[root@ceph1 ceph]# rbd ls -p kube
ceph-image
[root@ceph1 ceph]# rbd info ceph-image -p kube
#发现是由于k8s集群和ceph集群 kernel版本不一样,k8s集群的kernel版本较低,rdb块存储的一些feature 低版本kernel不支持,需要disable。通过如下命令disable
[root@ceph1 ceph]# rbd feature disable kube/ceph-image object-map fast-diff deep-flatten
#验证
kubectl exec -it ceph-pod -- df -h |grep /dev/rbd0
在 ceph1 部署节点上同步配置文件,并创建至少一个 mds 服务
使用 cephfs 必须保证至少有一个节点提供 mds 服务
[root@ceph1 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph1 ceph2 ceph3
#这里做三个mds
[root@ceph1 /etc/ceph]# ceph-deploy mds create ceph1 ceph2 ceph3
[root@ceph1 /etc/ceph]# ceph -s
1、创建 cephfs 存储池 :fs_metadata 、fs_data
2、创建 cephfs 文件系统:命名为 cephfs
3、一个 ceph 文件系统需要至少两个 RADOS 存储池,一个用于数据,一个用于元数据。
[root@ceph1 /etc/ceph]# ceph osd pool create cephfs_pool 64
pool 'cephfs_pool' created
[root@ceph1 /etc/ceph]# ceph osd pool create cephfs_metadata 64
pool 'cephfs_metadata' created
[root@ceph1 /etc/ceph]# ceph fs new cephfs cephfs_metadata cephfs_pool
[root@ceph1 /etc/ceph]# ceph fs ls
获取集群信息和查看 admin 用户 key(秘钥)
# ceph mon dump
# ceph auth get client.admin
注意:这里不需要 base64 加密
0.0、所有 k8s节点安装依赖组件
cat > /etc/yum.repos.d/ceph.repo << EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/x86_64/
enabled=1
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/noarch/
enabled=1
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-nautilus/el7/SRPMS
enabled=1
gpgcheck=0
priority=1
EOF
yum install ceph-common -y
0.1、需要把 ceph 集群的 /etc/ceph/{ceph.conf,ceph.client.admin.keyring} 文件同步到 k8s 所有节点上
[root@ceph1 ~]# ssh-copy-id k8s-master01
[root@ceph1 ~]# ssh-copy-id k8s-master02
[root@ceph1 ~]# ssh-copy-id k8s-master03
[root@ceph1 /etc/ceph]# ceph-deploy --overwrite-conf admin ceph1 ceph2 ceph3 k8s-master01 k8s-master02 k8s-master03
https://github.com/ceph/ceph-csi
#k8s版本v1.19.16,使用ceph版本14,csi插件版本ceph-csi-3.2.0
#git clone https://github.com/ceph/ceph-csi.git
git clone --branch=v3.2.0 https://github.com/ceph/ceph-csi.git
cd ceph-csi/deploy/cephfs/kubernetes
修改 csi-config-map.yaml 文件,配置连接 ceph 集群的信息
cat > csi-config-map.yaml << EOF
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "ba8c6ea3-30b5-4483-9fa0-26636c64a142", # ceph集群的ID,此内容可以使用ceph mon dump来查看,clusterID对应fsid
"monitors": [
"10.100.100.115:6789,10.100.100.116:6789,10.100.100.117:6789"
]
}
]
metadata:
name: ceph-csi-config
EOF
cd ceph-csi/deploy/cephfs/kubernetes
grep image csi-cephfsplugin-provisioner.yaml #查看所需镜像
#k8s版本v1.19.16,使用ceph版本14,csi插件版本ceph-csi-3.2.0
docker pull dyrnq/csi-node-driver-registrar:v2.0.1
docker tag dyrnq/csi-node-driver-registrar:v2.0.1 k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.0.1
docker rmi dyrnq/csi-node-driver-registrar:v2.0.1
docker pull dyrnq/csi-snapshotter:v3.0.2
docker tag dyrnq/csi-snapshotter:v3.0.2 k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2
docker rmi dyrnq/csi-snapshotter:v3.0.2
docker pull dyrnq/csi-provisioner:v2.0.4
docker tag dyrnq/csi-provisioner:v2.0.4 k8s.gcr.io/sig-storage/csi-provisioner:v2.0.4
docker rmi dyrnq/csi-provisioner:v2.0.4
docker pull dyrnq/csi-resizer:v1.0.1
docker tag dyrnq/csi-resizer:v1.0.1 k8s.gcr.io/sig-storage/csi-resizer:v1.0.1
docker rmi dyrnq/csi-resizer:v1.0.1
docker pull dyrnq/csi-attacher:v3.0.2
docker tag dyrnq/csi-attacher:v3.0.2 k8s.gcr.io/sig-storage/csi-attacher:v3.0.2
docker rmi dyrnq/csi-attacher:v3.0.2
因为这里k8s集群只有三台,所以必须配置k8s的k8s-master01运行pod
#查看k8s-master01表示不运行pod
[root@k8s-master01 ~]# kubectl describe node k8s-master01 |grep Taints
Taints: node-role.kubernetes.io/k8s-master01:NoSchedule
#查看k8s-master01表示运行pod
[root@k8s-master01 ~]# kubectl describe node k8s-master01 |grep Taints
Taints:
#让k8s-master01节点参与pod负载的命令为
kubectl taint nodes k8s-master01 node-role.kubernetes.io/k8s-master01-
#让k8s-master01节点恢复不参与pod负载的命令为
kubectl taint nodes k8s-master01 node-role.kubernetes.io/k8s-master01=:NoSchedule
cd ceph-csi/deploy/cephfs/kubernetes
sed -i 's/namespace: default/namespace: cephfs/g' *.yaml
kubectl create ns cephfs
kubectl -n cephfs apply -f csi-config-map.yaml
kubectl -n cephfs create -f csi-provisioner-rbac.yaml
kubectl -n cephfs create -f csi-nodeplugin-rbac.yaml
kubectl -n cephfs create -f csi-cephfsplugin-provisioner.yaml
kubectl -n cephfs create -f csi-cephfsplugin.yaml
1、k8s 上创建连接 ceph 集群的秘钥(创建 secret.yaml)
cd ~/ceph-csi/examples/cephfs #(官方secret.yaml文件模板位置)
cat > ~/my-ceph-csi/examples/cephfs/secret.yaml << EOF
---
apiVersion: v1
kind: Secret
metadata:
name: csi-cephfs-secret
namespace: cephfs
stringData:
# 通过ceph auth get client.admin查看,这里不需要base64加密
# Required for statically provisioned volumes
userID: admin
userKey: AQD59Q9lckU/LxAAXq25s+RS7gEPOlfr1Ry3kQ==
# Required for dynamically provisioned volumes
adminID: admin
adminKey: AQD59Q9lckU/LxAAXq25s+RS7gEPOlfr1Ry3kQ==
EOF
kubectl apply -f ~/my-ceph-csi/examples/cephfs/secret.yaml
kubectl get secret csi-cephfs-secret -n cephfs
2、创建存储类(创建 storageclass.yaml)
修改字段1(clusterID:)、改成自己ceph集群的ID,ceph mon dump
修改字段2(fsName:)、填写上面创建名为cephfs
的文件系统
修改字段3(pool:)、去掉注释,填写数据pool,不是元数据的pool
cd ~/ceph-csi/examples/cephfs #(官方storageclass.yaml文件模板位置)
cat > ~/my-ceph-csi/examples/cephfs/storageclass.yaml <---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-cephfs-sc
provisioner: cephfs.csi.ceph.com
parameters:
clusterID: ba8c6ea3-30b5-4483-9fa0-26636c64a142 #此处需要修改
fsName: cephfs #此处需要修改
pool: cephfs_pool #此处需要修改
csi.storage.k8s.io/provisioner-secret-name: csi-cephfs-secret
csi.storage.k8s.io/provisioner-secret-namespace: cephfs
csi.storage.k8s.io/controller-expand-secret-name: csi-cephfs-secret
csi.storage.k8s.io/controller-expand-secret-namespace: cephfs
csi.storage.k8s.io/node-stage-secret-name: csi-cephfs-secret
csi.storage.k8s.io/node-stage-secret-namespace: cephfs
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- debug
EOF
kubectl apply -f ~/my-ceph-csi/examples/cephfs/storageclass.yaml
kubectl get sc csi-cephfs-sc -n cephfs
3、基于 sc 创建 pvc
cd ~/ceph-csi/examples/cephfs #(官方pvc.yaml文件模板位置)
cat > ~/my-ceph-csi/examples/cephfs/pvc.yaml << EOF
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: csi-cephfs-pvc
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
storageClassName: csi-cephfs-sc
EOF
kubectl apply -f ~/my-ceph-csi/examples/cephfs/pvc.yaml
kubectl get pvc
#k8s ceph csi创建pvc报错 failed to get connection: connecting failed: rados: ret=13, Permission denied
[root@ceph1 ~]# ceph config set mon auth_allow_insecure_global_id_reclaim true
4、创建 pod 应用 pvc
cat > ~/my-ceph-csi/examples/cephfs/pod.yaml << EOF
---
apiVersion: v1
kind: Pod
metadata:
name: csi-cephfs-demo-pod
spec:
containers:
- name: web-server
image: nginx:alpine
volumeMounts:
- name: mypvc
mountPath: /var/lib/www
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: csi-cephfs-pvc
readOnly: false
EOF
kubectl apply -f ~/my-ceph-csi/examples/cephfs/pod.yaml
kubectl get pod csi-cephfs-demo-pod -n default
kubectl exec -it csi-cephfs-demo-pod -- df -Th
#创建存储池,指定pg和pgp的数量, pgp是对存在于pg的数据进行组合存储,pgp通常等于pg的值
# 创建存储池
ceph osd pool create kubernetes 64 64
#对存储池启用 RBD 功能
ceph osd pool application enable kubernetes rbd
#通过 RBD 命令对存储池初始化
rbd pool init -p kubernetes
#查看pool
ceph osd pool ls
#查看adim秘钥
ceph auth get client.admin 2>&1 |grep "key = " |awk '{print $3'}
#在Ceph上创建用户
ceph auth add client.kubernetes mon 'allow r' osd 'allow rwx pool=kubernetes'
#获取Ceph信息
ceph mon dump
#######################################################################
fsid ba8c6ea3-30b5-4483-9fa0-26636c64a142
min_mon_release 15 (octopus)
0: [v2:192.168.100.201:3300/0,v1:192.168.100.201:6789/0] mon.vm-201
1: [v2:192.168.100.202:3300/0,v1:192.168.100.202:6789/0] mon.vm-202
2: [v2:192.168.100.203:3300/0,v1:192.168.100.203:6789/0] mon.vm-203
#检查创建的用户
ceph auth get client.kubernetes
#######################################################################
[client.kubernetes]
key = AQBkIhBlIxMbMRAAqNw6Gab2wxNXScSsb7zS/w==
caps mon = "allow r"
caps osd = "allow rwx pool=kubernetes"
exported keyring for client.kubernetes
#######################################################################
cd /root/ceph-csi-3.2.0/deploy/rbd/kubernetes
# 删除csi-rbdplugin-provisioner.yaml和csi-rbdplugin.yaml中
# ceph-csi-encryption-kms-config配置文件映射
sed -i 's/namespace: default/namespace: ceph/g' *.yaml
kubectl create ns ceph
#执行文件
kubectl -n ceph apply -f .
kubectl get pod -n ceph
mkdir -p /root/ceph-csi-v3.2.0 && cd /root/ceph-csi-v3.2.0
#创建表空间
kubectl create ns ceph-csi
cat > 1-csi-config-map.yaml << 'EOF'
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "ba8c6ea3-30b5-4483-9fa0-26636c64a142",
"monitors": [
"10.100.100.115:6789",
"10.100.100.116:6789",
"10.100.100.117:6789"
]
}
]
metadata:
name: ceph-csi-config
namespace: default
EOF
cd /root/ceph-csi-v3.2.0
cat > 2-csi-rbd-secret.yaml << 'EOF'
---
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
stringData:
userID: kubernetes
userKey: AQBkIhBlIxMbMRAAqNw6Gab2wxNXScSsb7zS/w==
EOF
cd /root/ceph-csi-v3.2.0
cat > 3-csi-rbd-sc.yaml << 'EOF'
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
provisioner: rbd.csi.ceph.com
parameters:
clusterID: ba8c6ea3-30b5-4483-9fa0-26636c64a142
pool: kubernetes
imageFeatures: layering
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
csi.storage.k8s.io/fstype: xfs
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
EOF
sed -i 's/namespace: default/namespace: ceph/g' *.yaml
#执行文件
kubectl -n ceph apply -f 1-csi-config-map.yaml
kubectl -n ceph apply -f 2-csi-rbd-secret.yaml
kubectl -n ceph apply -f 3-csi-rbd-sc.yaml
kubectl -n ceph get sc -o wide
#针对 PV 持久卷,Kubernetes 支持两种卷模式(volumeModes):Filesystem(文件系统) 和 Block(块)。
#volumeMode 是一个可选的 API 参数。
#如果该参数被省略,默认的卷模式是 Filesystem。
#volumeMode 属性设置为 Filesystem 的卷会被 Pod 挂载(Mount) 到某个目录。
#如果卷的存储来自某块设备而该设备目前为空,Kuberneretes 会在第一次挂载卷之前 在设备上创建文件系统。
kubectl create ns test
cd /root/ceph-csi-v3.2.0
cat > 8-test-rbd-pvc.yaml << 'EOF'
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: raw-block-pvc
namespace: test
spec:
accessModes:
- ReadWriteOnce
volumeMode: Block
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
EOF
kubectl apply -f 8-test-rbd-pvc.yaml
kubectl get pvc -o wide
#NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
#raw-block-pvc Bound pvc-beb3a8c6-b659-44c6-bc5c-5fbd3ead1706 1Gi RWO csi-rbd-sc 10s Block
cat > 9-test-rbd-pod.yaml << 'EOF'
---
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-block-pod
namespace: test
spec:
containers:
- name: centos-pod
image: nginx:1.21
imagePullPolicy: "IfNotPresent"
command: ["/bin/sh", "-c"]
args: ["tail -f /dev/null"]
volumeDevices:
- name: data
devicePath: /dev/xvda
volumes:
- name: data
persistentVolumeClaim:
claimName: raw-block-pvc
EOF
kubectl apply -f 9-test-rbd-pod.yaml
kubectl get po -n test
#针对 PV 持久卷,Kubernetes 支持两种卷模式(volumeModes):Filesystem(文件系统) 和 Block(块)。
#volumeMode 是一个可选的 API 参数。
#如果该参数被省略,默认的卷模式是 Filesystem。
#volumeMode 属性设置为 Filesystem 的卷会被 Pod 挂载(Mount) 到某个目录。
#如果卷的存储来自某块设备而该设备目前为空,Kuberneretes 会在第一次挂载卷之前 在设备上创建文件系统。
cd /root/ceph-csi-v3.2.0
cat > 10-pvc.yaml << 'EOF'
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
EOF
kubectl apply -f 10-pvc.yaml
kubectl get pvc -o wide
#NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
#raw-block-pvc Bound pvc-beb3a8c6-b659-44c6-bc5c-5fbd3ead1706 1Gi RWO csi-rbd-sc 81m Block
#rbd-pvc Bound pvc-3a57e9e7-31c1-4fe6-958e-df4a985654c5 1Gi RWO csi-rbd-sc 29m Filesystem
cat > 11-pod.yaml << 'EOF'
---
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-file-pod
spec:
containers:
- name: web
image: nginx:1.21
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc
readOnly: false
EOF
kubectl apply -f 11-pod.yaml
# kubectl exec -it csi-rbd-file-pod -- df -h
#Filesystem Size Used Avail Use% Mounted on
#overlay 20G 4.8G 16G 24% /
#tmpfs 64M 0 64M 0% /dev
#tmpfs 1.9G 0 1.9G 0% /sys/fs/cgroup
#/dev/sda1 20G 4.8G 16G 24% /etc/hosts
#shm 64M 0 64M 0% /dev/shm
#/dev/rbd0 976M 2.6M 958M 1% /var/lib/www/html <-- 存储空间已挂载
#tmpfs 1.9G 12K 1.9G 1% /run/secrets/kubernetes.io/serviceaccount
#tmpfs 1.9G 0 1.9G 0% /proc/acpi
#tmpfs 1.9G 0 1.9G 0% /proc/scsi
#tmpfs 1.9G 0 1.9G 0% /sys/firmware
kubectl get po