k8s通过rook安装ceph存储

1、把代码git到本地
git clone --single-branch --branch release-1.7 https://github.com/rook/rook.git
cd rook/cluster/examples/kubernetes/ceph
2、修改operator.yaml的镜像(默认镜像源在国外拉不下来),下面是修改好的国内镜像
  ROOK_CSI_CEPH_IMAGE: "quay.io/cephcsi/cephcsi:v3.4.0"
  ROOK_CSI_REGISTRAR_IMAGE: "antidebug/csi-node-driver-registrar:v2.3.0"
  ROOK_CSI_RESIZER_IMAGE: "antidebug/csi-resizer:v1.3.0"
  ROOK_CSI_PROVISIONER_IMAGE: "antidebug/csi-provisioner:v3.0.0"
  ROOK_CSI_SNAPSHOTTER_IMAGE: "antidebug/csi-snapshotter:v4.2.0"
  ROOK_CSI_ATTACHER_IMAGE: "antidebug/csi-attacher:v3.3.0"
kubectl create -f crds.yaml -f common.yaml -f operator.yaml
3、修改cluster.yaml(主要修改为mon的数量和osd的节点和磁盘)
useAllNodes: false
useAllDevices: false
nodes:
- name: "k8s-node2"
  devices:
  - name: "sda"
  
kubectl create -f cluster.yaml
4、创建块存储类型storageclass和对应pvc
ceph-block-pool.yaml
apiVersion: ceph.rook.io/v1
kind: CephBlockPool
metadata:
  name: replicapool
  namespace: rook-ceph
spec:
  failureDomain: host
  replicated:
    size: 1

block-sc.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
   name: rook-ceph-block
provisioner: rook-ceph.rbd.csi.ceph.com
parameters: 
    clusterID: rook-ceph
    pool: replicapool
    imageFormat: "2"
    imageFeatures: layering
    csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner
    csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
#    csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node
#    csi.storage.k8s.io/node-stage-secret-namesapce: rook-ceph
    csi.storage.k8s.io/fstype: xfs
allowVolumeExpansion: true
reclaimPolicy: Delete

block-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mytomcat-pvc
spec:
  storageClassName: rook-ceph-block
  volumeMode: Block
  accessModes:
    - ReadWriteMany
  resources: 
    requests:
      storage: 200M
      
kubectl apply -f  ceph-block-pool.yaml -f  block-sc.yaml -f block-pvc.yaml
5、创建文件共享型storageclass和pvc
fs-ceph.yaml
apiVersion: ceph.rook.io/v1
kind: CephFilesystem
metadata:
  name: cephfs
  namespace: rook-ceph
spec:
  metadataPool:
    replicated:
      size: 1
  dataPools:
    - replicated:
        size: 1
  preserveFilesystemOnDelete: true
  metadataServer:
    activeCount: 1
    activeStandby: true
    
fs-ceph-sc.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: rook-cephfs
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
provisioner: rook-ceph.cephfs.csi.ceph.com
parameters:
  # clusterID is the namespace where the rook cluster is running
  # If you change this namespace, also change the namespace below where the secret namespaces are defined
  clusterID: rook-ceph

  # CephFS filesystem name into which the volume shall be created
  fsName: cephfs

  # Ceph pool into which the volume shall be created
  # Required for provisionVolume: "true"
  pool: cephfs-data0

  # The secrets contain Ceph admin credentials. These are generated automatically by the operator
  # in the same namespace as the cluster.
  csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
  csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
  csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
  csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
  csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
  csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
allowVolumeExpansion: true
reclaimPolicy: Delete

fs-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc
spec:
  accessModes:
  - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
  storageClassName: rook-cephfs
  
  
kubectl apply -f fs-ceph.yaml -f fs-ceph-sc.yaml -f fs-pvc.yaml

你可能感兴趣的:(k8s,1024程序员节)