本文旨在说明如何在kubernetes集群中使用ceph-csi (版本 v2.0)做kubernetes的存储,官方的搭建的说明ceph-csi环境搭建
值得注意的是(官方的说明文档里面有):ceph-csi使用的是RBD的默认kernel模块,这个模块可能不支持所有的Ceph CRUSH或者RBD镜像的特征。
[root@node1 ~]# ceph osd pool create kubernetes
[root@node1 ~]# rbd pool init kubernetes
[root@node1 ~]# ceph auth get-or-create client.kubernetes mon 'profile rbd' osd 'profile rbd pool=kubernetes' mgr 'profile rbd pool=kubernetes'
[client.kubernetes]
key = AQD9o0Fd6hQRChAAt7fMaSZXduT3NWEqylNpmg==
ceph auth get-or-create client.kubernetes mon 'allow r' osd 'allow rwx pool=kubernetes' -o ceph.client.kubernetes.keyring
[root@node1 ~]# ceph mon dump
dumped monmap epoch 1
epoch 1
fsid 82e65362-72f7-4206-b94d-04e2c5e2d70d
last_changed 2020-05-29 05:26:42.313889
created 2020-05-29 05:26:42.313889
min_mon_release 14 (nautilus)
0: [v2:10.80.0.41:3300/0,v1:10.80.0.41:6789/0] mon.node1
用以上的的信息生成configmap:
[root@node1 ~]# cat < csi-config-map.yaml
---
apiVersion: v1
kind: ConfigMap
data:
config.json: |-
[
{
"clusterID": "82e65362-72f7-4206-b94d-04e2c5e2d70d",
"monitors": [
"10.80.0.41:6789"
]
}
]
metadata:
name: ceph-csi-config
EOF
在kubernetes集群上,将此configmap存储到集群
kubectl apply -f csi-config-map.yaml
[root@node1 ~]# cat < csi-rbd-secret.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: csi-rbd-secret
namespace: default
stringData:
userID: kubernetes
userKey: AQD9o0Fd6hQRChAAt7fMaSZXduT3NWEqylNpmg==
EOF
将此配置存储到kubernetes中
kubectl apply -f csi-rbd-secret.yaml
kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-provisioner-rbac.yaml
kubectl apply -f https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-nodeplugin-rbac.yaml
$ wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin-provisioner.yaml
$ kubectl apply -f csi-rbdplugin-provisioner.yaml
$ wget https://raw.githubusercontent.com/ceph/ceph-csi/master/deploy/rbd/kubernetes/csi-rbdplugin.yaml
$ kubectl apply -f csi-rbdplugin.yaml
$ cat < csi-rbd-sc.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
parameters:
clusterID: 82e65362-72f7-4206-b94d-04e2c5e2d70d
imageFeatures: layering
pool: kubernetes
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
reclaimPolicy: Delete
mountOptions:
- discard
EOF
$ kubectl apply -f csi-rbd-sc.yaml
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-rbd-sc
provisioner: rbd.csi.ceph.com
# If topology based provisioning is desired, delayed provisioning of
# PV is required and is enabled using the following attribute
# For further information read TODO
# volumeBindingMode: WaitForFirstConsumer
parameters:
# String representing a Ceph cluster to provision storage from.
# Should be unique across all Ceph clusters in use for provisioning,
# cannot be greater than 36 bytes in length, and should remain immutable for
# the lifetime of the StorageClass in use.
# Ensure to create an entry in the config map named ceph-csi-config, based on
# csi-config-map-sample.yaml, to accompany the string chosen to
# represent the Ceph cluster in clusterID below
clusterID:
# If you want to use erasure coded pool with RBD, you need to create
# two pools. one erasure coded and one replicated.
# You need to specify the replicated pool here in the `pool` parameter, it is
# used for the metadata of the images.
# The erasure coded pool must be set as the `dataPool` parameter below.
# dataPool: ec-data-pool
pool: rbd
# RBD image features, CSI creates image with image-format 2
# CSI RBD currently supports only `layering` feature.
imageFeatures: layering
# The secrets have to contain Ceph credentials with required access
# to the 'pool'.
csi.storage.k8s.io/provisioner-secret-name: csi-rbd-secret
csi.storage.k8s.io/provisioner-secret-namespace: default
csi.storage.k8s.io/controller-expand-secret-name: csi-rbd-secret
csi.storage.k8s.io/controller-expand-secret-namespace: default
csi.storage.k8s.io/node-stage-secret-name: csi-rbd-secret
csi.storage.k8s.io/node-stage-secret-namespace: default
# Specify the filesystem type of the volume. If not specified,
# csi-provisioner will set default as `ext4`.
csi.storage.k8s.io/fstype: ext4
# uncomment the following to use rbd-nbd as mounter on supported nodes
# mounter: rbd-nbd
# Prefix to use for naming RBD images.
# If omitted, defaults to "csi-vol-".
# volumeNamePrefix: "foo-bar-"
# Instruct the plugin it has to encrypt the volume
# By default it is disabled. Valid values are "true" or "false".
# A string is expected here, i.e. "true", not true.
# encrypted: "true"
# Use external key management system for encryption passphrases by specifying
# a unique ID matching KMS ConfigMap. The ID is only used for correlation to
# config map entry.
# encryptionKMSID:
# Add topology constrained pools configuration, if topology based pools
# are setup, and topology constrained provisioning is required.
# For further information read TODO
# topologyConstrainedPools: |
# [{"poolName":"pool0",
# "dataPool":"ec-pool0" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone1"}]},
# {"poolName":"pool1",
# "dataPool":"ec-pool1" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"east"},
# {"domainLabel":"zone","value":"zone2"}]},
# {"poolName":"pool2",
# "dataPool":"ec-pool2" # optional, erasure-coded pool for data
# "domainSegments":[
# {"domainLabel":"region","value":"west"},
# {"domainLabel":"zone","value":"zone1"}]}
# ]
reclaimPolicy: Delete
allowVolumeExpansion: true
mountOptions:
- discard
Warning FailedMapVolume 0s (x4 over 5s) kubelet, k8s-1 MapVolume.SetUp failed for volume "pvc-5340f32e-fa2e-4f53-af1d-7154430af7a2" : rpc error: code = Internal desc = rbd: map failed exit status 6, rbd output: rbd: sysfs write failed
RBD image feature set mismatch. Try disabling features unsupported by the kernel with "rbd feature disable".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
ceph nautious默认的image需要支持的特征有features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
; linux内核3.10版本只支持layering,所以这里有必要限制一下,当然我们也可以升级linux内核版本,不过这个代价就比较大了$ cat < raw-block-pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: raw-block-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Block
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
EOF
$ kubectl apply -f raw-block-pvc.yaml
$ cat < raw-block-pod.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: pod-with-raw-block-volume
spec:
containers:
- name: fc-container
image: fedora:26
command: ["/bin/sh", "-c"]
args: ["tail -f /dev/null"]
volumeDevices:
- name: data
devicePath: /dev/xvda
volumes:
- name: data
persistentVolumeClaim:
claimName: raw-block-pvc
EOF
$ kubectl apply -f raw-block-pod.yaml
$ cat < pvc.yaml
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: rbd-pvc
spec:
accessModes:
- ReadWriteOnce
volumeMode: Filesystem
resources:
requests:
storage: 1Gi
storageClassName: csi-rbd-sc
EOF
$ kubectl apply -f pvc.yaml
$ cat < pod.yaml
---
apiVersion: v1
kind: Pod
metadata:
name: csi-rbd-demo-pod
spec:
containers:
- name: web-server
image: nginx
volumeMounts:
- name: mypvc
mountPath: /var/lib/www/html
volumes:
- name: mypvc
persistentVolumeClaim:
claimName: rbd-pvc
readOnly: false
EOF
$ kubectl apply -f pod.yaml
ceph-csi/examples/kms/vault/kms-config.yaml
ceph-csi/deploy/rbd/kubernetes/csi-rbdplugin.yaml
中的DaemonSet的spec中添加如下配置:tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
示例如下: