Kubernetes Ceph tier配置

  • 快速安装一带而过

cd cluster/examples/kubernetes/ceph
kubectl create -f common.yaml
kubectl create -f operator.yaml
kubectl -n rook-ceph get pod  #所有pod状态为running后,执行创建集群
  • 打标签

运行ceph-mon的节点打上:ceph-mon=enabled
kubectl label nodes {k8s-product01-ceph01,k8s-product01-ceph02,k8s-product01-ceph03} ceph-mon=enabled
运行ceph-osd的节点,也就是存储节点,打上:ceph-osd=enabled
kubectl label nodes {k8s-product01-ceph01,k8s-product01-ceph02,k8s-product01-ceph03} ceph-osd=enabled
运行ceph-mgr的节点,打上:ceph-mgr=enabled
#mgr只能支持一个节点运行,这是ceph跑k8s里的局限
kubectl label nodes  {k8s-product01-ceph01,k8s-product01-ceph02,k8s-product01-ceph03} ceph-mgr=enabled
  • 镜像放到自己的私有仓库

Kubernetes Ceph tier配置_第1张图片

docker images
docker tag ceph/ceph:v14.2.1-20190430  reg01.sky-mobi.com/k8s/ceph:v14.2.1-20190430
docker push reg01.sky-mobi.com/k8s/ceph:v14.2.1-20190430
  • cluster.conf

apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
  name: rook-ceph
  namespace: rook-ceph
spec:
  cephVersion:
    image: reg01.sky-mobi.com/k8s/ceph:v14.2.1-20190430
    allowUnsupported: false
  dataDirHostPath: /var/lib/rook
  mon:
    count: 3
    allowMultiplePerNode: false
  dashboard:
    enabled: true
  network:
    hostNetwork: false
  rbdMirroring:
    workers: 0
  placement:
    mon:
      nodeAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
          nodeSelectorTerms:
          - matchExpressions:
            - key: ceph-mon
              operator: In
              values:
              - enabled
    osd:
      nodeAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
          nodeSelectorTerms:
          - matchExpressions:
            - key: ceph-osd
              operator: In
              values:
              - enabled
    mgr:
      nodeAffinity:
        requiredDuringSchedulingIgnoredDuringExecution:
          nodeSelectorTerms:
          - matchExpressions:
            - key: ceph-mgr
              operator: In
              values:
              - enabled
  annotations:
  resources:
  storage: # cluster level storage configuration and selection
    useAllNodes: false
    useAllDevices: false
    deviceFilter:
    location:
    config:
    nodes:
    - name: "k8s-product01-ceph01"
      deviceFilter: "^sd[b-g]"
      directories:
      - path: "/ssd"
    - name: "k8s-product01-ceph02"
      deviceFilter: "^sd[b-g]"
      directories:
      - path: "/ssd"
    - name: "k8s-product01-ceph03" 
      deviceFilter: "^sd[b-g]"
      directories:
      - path: "/ssd"
  • 创建集群

kubectl create -f cluster.yaml

Kubernetes Ceph tier配置_第2张图片

  • 配置ceph

kubectl -n rook-ceph get pod -l "app=rook-ceph-tools" -o wide
NAME                              READY   STATUS    RESTARTS   AGE     IP               NODE                   NOMINATED NODE   READINESS GATES
rook-ceph-tools-7855f6666-gwvmv   1/1     Running   0          5h58m   192.168.183.11   k8s-product01-ceph01              
kubectl  -n rook-ceph exec -it rook-ceph-tools-7855f6666-gwvmv bash
  • 进入到ceph工具所在pod执行以下操作

ceph osd getcrushmap -o crushmap.map  #导出crush map
crushtool -d crushmap.map -o crushmap.txt  #转为可读文本
  • 未修改文本内容

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable straw_calc_version 1

# devices
device 0 osd.0 class hdd
device 1 osd.1 class hdd
device 2 osd.2 class hdd
device 3 osd.3 class hdd
device 4 osd.4 class hdd
device 5 osd.5 class hdd
device 6 osd.6 class hdd
device 7 osd.7 class hdd
device 8 osd.8 class hdd
device 9 osd.9 class hdd
device 10 osd.10 class hdd
device 11 osd.11 class hdd
device 12 osd.12 class hdd
device 13 osd.13 class hdd
device 14 osd.14 class hdd
device 15 osd.15 class hdd
device 16 osd.16 class hdd
device 17 osd.17 class hdd
device 18 osd.18 class ssd
device 19 osd.19 class ssd
device 20 osd.20 class ssd

# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 region
type 10 root
# buckets
host k8s-product01-ceph01 {
        id -3           # do not change unnecessarily
        id -4 class hdd         # do not change unnecessarily
        id -9 class ssd         # do not change unnecessarily
        # weight 22.917
        alg straw
        hash 0  # rjenkins1
        item osd.15 weight 3.638
        item osd.3 weight 3.638
        item osd.12 weight 3.638
        item osd.6 weight 3.638
        item osd.1 weight 3.638
        item osd.9 weight 3.638
        item osd.18 weight 1.091
}
host k8s-product01-ceph02 {
        id -5           # do not change unnecessarily
        id -6 class hdd         # do not change unnecessarily
        id -10 class ssd                # do not change unnecessarily
        # weight 22.917
        alg straw
        hash 0  # rjenkins1
        item osd.4 weight 3.638
        item osd.16 weight 3.638
        item osd.7 weight 3.638
        item osd.0 weight 3.638
        item osd.13 weight 3.638
        item osd.10 weight 3.638
        item osd.19 weight 1.091
}
host k8s-product01-ceph03 {
        id -7           # do not change unnecessarily
        id -8 class hdd         # do not change unnecessarily
        id -11 class ssd                # do not change unnecessarily
        # weight 22.917
        alg straw
        hash 0  # rjenkins1
        item osd.14 weight 3.638
        item osd.11 weight 3.638
        item osd.5 weight 3.638
        item osd.8 weight 3.638
        item osd.17 weight 3.638
        item osd.2 weight 3.638
        item osd.20 weight 1.091
}
root default {
        id -1           # do not change unnecessarily
        id -2 class hdd         # do not change unnecessarily
        id -12 class ssd                # do not change unnecessarily
        # weight 68.751
        alg straw
        hash 0  # rjenkins1
        item k8s-product01-ceph01 weight 22.917
        item k8s-product01-ceph02 weight 22.917
        item k8s-product01-ceph03 weight 22.917
}

# rules
rule replicated_ruleset {
        id 0
        type replicated
        min_size 1
        max_size 10
        step take default
        step chooseleaf firstn 0 type host
        step emit
}

# end crush map
  • 复制一份文件进行修改

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1
tunable chooseleaf_vary_r 1
tunable straw_calc_version 1

# devices
device 0 osd.0 class hdd
device 1 osd.1 class hdd
device 2 osd.2 class hdd
device 3 osd.3 class hdd
device 4 osd.4 class hdd
device 5 osd.5 class hdd
device 6 osd.6 class hdd
device 7 osd.7 class hdd
device 8 osd.8 class hdd
device 9 osd.9 class hdd
device 10 osd.10 class hdd
device 11 osd.11 class hdd
device 12 osd.12 class hdd
device 13 osd.13 class hdd
device 14 osd.14 class hdd
device 15 osd.15 class hdd
device 16 osd.16 class hdd
device 17 osd.17 class hdd
device 18 osd.18 class ssd
device 19 osd.19 class ssd
device 20 osd.20 class ssd

# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 region
type 10 root
type 11 skydiskarray    #添加的types
# 重新添加bucket,每个节点分为两组,一组ssd,一组sata
skydiskarray k8s-product01-ceph01-ssd {
        id -1
        alg straw
        hash 0  # rjenkins1
        item osd.18 weight 1.091
}
skydiskarray k8s-product01-ceph01-sata {
        id -2
        alg straw
        hash 0  # rjenkins1
        item osd.15 weight 3.638
        item osd.3 weight 3.638
        item osd.12 weight 3.638
        item osd.6 weight 3.638
        item osd.1 weight 3.638
        item osd.9 weight 3.638        
}
skydiskarray k8s-product01-ceph02-ssd {
        id -3
        alg straw
        hash 0  # rjenkins1
        item osd.19 weight 1.091
}
skydiskarray k8s-product01-ceph02-sata {
        id -4
        alg straw
        hash 0  # rjenkins1
        item osd.4 weight 3.638
        item osd.16 weight 3.638
        item osd.7 weight 3.638
        item osd.0 weight 3.638
        item osd.13 weight 3.638
        item osd.10 weight 3.638
}
skydiskarray k8s-product01-ceph03-ssd {
        id -5
        alg straw
        hash 0  # rjenkins1
        item osd.20 weight 1.091
}
skydiskarray k8s-product01-ceph03-sata {
        id -6
        alg straw
        hash 0  # rjenkins1
        item osd.14 weight 3.638
        item osd.11 weight 3.638
        item osd.5 weight 3.638
        item osd.8 weight 3.638
        item osd.17 weight 3.638
        item osd.2 weight 3.638
}
root ssd {
        id -7
        alg straw
        hash 0  # rjenkins1
        item k8s-product01-ceph01-ssd weight 1.091
        item k8s-product01-ceph02-ssd weight 1.091
        item k8s-product01-ceph03-ssd weight 1.091
}
root sata {
        id -8
        alg straw
        hash 0  # rjenkins1
        item k8s-product01-ceph01-sata weight 21.828
        item k8s-product01-ceph02-sata weight 21.828
        item k8s-product01-ceph03-sata weight 21.828
}

# 添加新的rules
rule ssd_ruleset {
        ruleset 0
        type replicated
        min_size 1
        max_size 10
        step take ssd
        step chooseleaf firstn 0 type skydiskarray
        step emit
}
rule sata_ruleset {
        ruleset 1
        type replicated
        min_size 1
        max_size 10
        step take sata
        step chooseleaf firstn 0 type skydiskarray
        step emit
}

# end crush map
  • 配置ceph tier

crushtool -c skymobi.txt -o skymobi.map #编译为二进制文件
ceph osd setcrushmap -i skymobi.map  #导入配置
ceph osd pool create ssd-pool 128 128 ssd_ruleset #创建ssd池
ceph osd pool create sata-pool 1024 1024 sata_ruleset #创建sata池
ceph osd tier add sata-pool ssd-pool
ceph osd tier cache-mode ssd-pool writeback
ceph osd tier set-overlay sata-pool ssd-pool
ceph osd pool set ssd-pool hit_set_type bloom
ceph osd pool set ssd-pool hit_set_count 12
ceph osd pool set ssd-pool hit_set_period 14400
ceph osd pool set ssd-pool target_max_bytes 900000000000
ceph osd pool set ssd-pool min_read_recency_for_promote 1
ceph osd pool set ssd-pool min_write_recency_for_promote 1
ceph osd pool set ssd-pool cache_target_dirty_ratio 0.4
ceph osd pool set ssd-pool cache_target_dirty_high_ratio 0.6
ceph osd pool set ssd-pool cache_target_full_ratio 0.8

参考:
http://docs.ceph.com/docs/master/rados/operations/cache-tiering/
https://rook.github.io/docs/rook/v1.0/ceph-cluster-crd.html
https://rook.github.io/docs/rook/v1.0/ceph-pool-crd.html
http://docs.ceph.com/docs/master/rados/operations/pools/

你可能感兴趣的:(#,storage,#,Kubernetes)