ceph crushmap

实验环境中修改后的crushmap如下:

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1

# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5

# types
type 0 osd
type 1 host
type 2 chassis
type 3 rack
type 4 row
type 5 pdu
type 6 pod
type 7 room
type 8 datacenter
type 9 region
type 10 root

# buckets
host osd0 {
	id -2		# do not change unnecessarily
	# weight 0.020
	alg straw
	hash 0	# rjenkins1
	item osd.0 weight 0.010
	item osd.1 weight 0.010
}
host osd1 {
	id -3		# do not change unnecessarily
	# weight 0.020
	alg straw
	hash 0	# rjenkins1
	item osd.2 weight 0.010
	item osd.3 weight 0.010
}
host osd2 {
	id -4		# do not change unnecessarily
	# weight 0.020
	alg straw
	hash 0	# rjenkins1
	item osd.4 weight 0.010
	item osd.5 weight 0.010
}

host osd3 {
	id -5
	alg straw
	hash 0
	item osd.0 weight 0.010
	item osd.2 weight 0.010
	item osd.4 weight 0.010
}

root default {
	id -1		# do not change unnecessarily
	# weight 0.060
	alg straw
	hash 0	# rjenkins1
#     	item osd0 weight 0.020
#	item osd1 weight 0.020
#	item osd2 weight 0.020
	item osd3 weight 0.030
}

# rules
rule replicated_ruleset {
	ruleset 0
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type osd
	step emit
}
# end crush map

这里我自定义了一个osd3,osd3中包含了其他3台机器中的各一块盘。然后在默认桶中加入osd3,修改rule,需要注意的是,rule中的type要改成osd!

按照以上套路,就可以很方便的定义自己的crushmap了。

# begin crush map
tunable choose_local_tries 0
tunable choose_local_fallback_tries 0
tunable choose_total_tries 50
tunable chooseleaf_descend_once 1

# devices
device 0 osd.0
device 1 osd.1
device 2 osd.2
device 3 osd.3
device 4 osd.4
device 5 osd.5
device 6 osd.6
device 7 osd.7

# types
type 0 device
type 1 host
type 2 pool

# buckets
host default_10.10.2.1 {
	id -2		# do not change unnecessarily
	# weight 9.019
	alg straw
	hash 0	# rjenkins1
	item osd.0 weight 4.509
	item osd.1 weight 4.509
}
host default_10.10.2.2 {
	id -3		# do not change unnecessarily
	# weight 9.019
	alg straw
	hash 0	# rjenkins1
	item osd.2 weight 4.509
	item osd.3 weight 4.509
}
host default_10.10.2.3 {
	id -4		# do not change unnecessarily
	# weight 9.019
	alg straw
	hash 0	# rjenkins1
	item osd.4 weight 4.509
	item osd.5 weight 4.509
}
host default_10.10.2.4 {
	id -5		# do not change unnecessarily
	# weight 9.019
	alg straw
	hash 0	# rjenkins1
	item osd.6 weight 4.509
	item osd.7 weight 4.509
}
pool default {
	id -1		# do not change unnecessarily
	# weight 36.074
	alg straw
	hash 0	# rjenkins1
	item default_10.10.2.1 weight 9.019
	item default_10.10.2.2 weight 9.019
	item default_10.10.2.3 weight 9.019
	item default_10.10.2.4 weight 9.019
}

# rules
rule data {
	ruleset 0
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule metadata {
	ruleset 1
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}
rule rbd {
	ruleset 2
	type replicated
	min_size 1
	max_size 10
	step take default
	step chooseleaf firstn 0 type host
	step emit
}

# end crush map

 sebastien-han写了一篇配置ssd和sata crushmap的文章,先看示意图:

ceph crushmap_第1张图片

将其配置摘录如下:

##
# OSD SATA DECLARATION
##
host ceph-osd2-sata {
  id -2   # do not change unnecessarily
  # weight 0.000
  alg straw
  hash 0  # rjenkins1
  item osd.0 weight 1.000
  item osd.3 weight 1.000
}
host ceph-osd1-sata {
  id -3   # do not change unnecessarily
  # weight 0.000
  alg straw
  hash 0  # rjenkins1
  item osd.2 weight 1.000
  item osd.5 weight 1.000
}
host ceph-osd0-sata {
  id -4   # do not change unnecessarily
  # weight 0.000
  alg straw
  hash 0  # rjenkins1
  item osd.1 weight 1.000
  item osd.4 weight 1.000
}

##
# OSD SSD DECLARATION
##

host ceph-osd2-ssd {
  id -22    # do not change unnecessarily
  # weight 0.000
  alg straw
  hash 0  # rjenkins1
  item osd.6 weight 1.000
  item osd.9 weight 1.000
}
host ceph-osd1-ssd {
  id -23    # do not change unnecessarily
  # weight 0.000
  alg straw
  hash 0  # rjenkins1
  item osd.8 weight 1.000
  item osd.11 weight 1.000
}
host ceph-osd0-ssd {
  id -24    # do not change unnecessarily
  # weight 0.000
  alg straw
  hash 0  # rjenkins1
  item osd.7 weight 1.000
  item osd.10 weight 1.000
}

##
# SATA ROOT DECLARATION
##

root sata {
  id -1   # do not change unnecessarily
  # weight 0.000
  alg straw
  hash 0  # rjenkins1
  item ceph-osd2-sata weight 2.000
  item ceph-osd1-sata weight 2.000
  item ceph-osd0-sata weight 2.000
}

##
# SATA ROOT DECLARATION
##

root ssd {
  id -21    # do not change unnecessarily
  # weight 0.000
  alg straw
  hash 0  # rjenkins1
  item ceph-osd2-ssd weight 2.000
  item ceph-osd1-ssd weight 2.000
  item ceph-osd0-ssd weight 2.000
}

##
# SSD RULE DECLARATION
##

# rules
rule ssd {
 ruleset 0
 type replicated
 min_size 1
 max_size 10
 step take ssd
 step chooseleaf firstn 0 type host
 step emit
}

##
# SATA RULE DECLARATION
##

rule sata {
 ruleset 1
 type replicated
 min_size 1
 max_size 10
 step take sata
 step chooseleaf firstn 0 type host
 step emit
}
使以上crushmap生效:


$ crushtool -c lamap.txt -o lamap.coloc
$ sudo ceph osd setcrushmap -i lamap.coloc
指定pool的crush rule:


root@ceph-mon0:~# ceph osd pool create ssd 128 128
pool 'ssd' created
root@ceph-mon0:~# ceph osd pool create sata 128 128
pool 'sata' created

root@ceph-mon0:~# ceph osd pool set ssd crush_ruleset 0
set pool 8 crush_ruleset to 0
root@ceph-mon0:~# ceph osd pool set sata crush_ruleset 1
set pool 9 crush_ruleset to 1

[osd]
osd crush update on start = false

还是比较简单的。但跨不同网段的crushmap怎么配置呢?还没有查到相关资料。

你可能感兴趣的:(ceph crushmap)