IP
磁盘
每个节点机器上启动的服务和数量:
在node1上启动mon
docker run -d --net=host \
--name mon \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-e MON_IP=10.1.135.180 \
-e CEPH_PUBLIC_NETWORK=10.1.135.0/24 \
ceph/daemon mon
在node2上创建文件夹
mkdir /var/lib/ceph
将必要的文件从node1上拷贝到node2
scp -r /etc/ceph root@node2:/etc
scp -r /var/lib/ceph/bootstrap* root@node2:/var/lib/ceph
修改node2上/etc/ceph/ceph.conf文件中的2项
mon initial members = node1,node2
mon host = 10.1.135.180,10.1.135.181
在node2上启动mon
docker run -d --net=host \
--name mon \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-e MON_IP=10.1.135.181 \
-e CEPH_PUBLIC_NETWORK=10.1.135.0/24 \
ceph/daemon mon
在node3上创建文件夹
mkdir /var/lib/ceph
将必要的文件从node2上拷贝到node3机器上
scp -r /etc/ceph root@node3:/etc
scp -r /var/lib/ceph/bootstrap* root@node3:/var/lib/ceph
修改node3上/etc/ceph/ceph.conf文件中的2项
mon initial members = node1,node2,node3
mon host = 10.1.135.180,10.1.135.181,10.1.135.182
在node3上启动mon
docker run -d --net=host \
--name mon \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-e MON_IP=10.1.135.182 \
-e CEPH_PUBLIC_NETWORK=10.1.135.0/24 \
ceph/daemon mon
将node3上的/etc/ceph/ceph.conf文件拷贝到node1和node2上
scp /etc/ceph/ceph.conf root@node1:/etc/ceph/ceph.conf
scp /etc/ceph/ceph.conf root@node2:/etc/ceph/ceph.conf
检查集群状态
[root@node1 ~]# docker exec mon ceph -s
cluster:
id: 47d6ebed-d923-4b1b-9d93-530913a016b3
health: HEALTH_OK
services:
mon: 3 daemons, quorum node1,node2,node3
mgr: no daemons active
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
分别在每个节点上执行以下命令
docker run -d --net=host \
--name mgr \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
ceph/daemon mgr
检查集群状态
[root@node1 ~]# docker exec mon ceph -s
cluster:
id: 47d6ebed-d923-4b1b-9d93-530913a016b3
health: HEALTH_OK
services:
mon: 3 daemons, quorum node1,node2,node3
mgr: node1(active), standbys: node3, node2
osd: 0 osds: 0 up, 0 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
分别在每个节点上执行以下命令
docker run --rm --privileged=true \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/sda \
ceph/daemon zap_device
docker run --rm --privileged=true \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/sdc \
ceph/daemon zap_device
docker run --rm --privileged=true \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/sdd \
ceph/daemon zap_device
docker run --rm --privileged=true \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/sde \
ceph/daemon zap_device
docker run -d --net=host \
--name osd1 \
--privileged=true \
--pid=host \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/sda \
-e OSD_TYPE=disk \
-e OSD_BLUESTORE=1 \
-e OSD_BLUESTORE_BLOCK_WAL=/dev/sdc \
-e OSD_BLUESTORE_BLOCK_DB=/dev/sdc \
ceph/daemon osd
docker run -d --net=host \
--name osd2 \
--privileged=true \
--pid=host \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/sdd \
-e OSD_TYPE=disk \
-e OSD_BLUESTORE=1 \
-e OSD_BLUESTORE_BLOCK_WAL=/dev/sdc \
-e OSD_BLUESTORE_BLOCK_DB=/dev/sdc \
ceph/daemon osd
docker run -d --net=host \
--name osd3 \
--privileged=true \
--pid=host \
-v /etc/ceph:/etc/ceph \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /dev/:/dev/ \
-e OSD_DEVICE=/dev/sde \
-e OSD_TYPE=disk \
-e OSD_BLUESTORE=1 \
-e OSD_BLUESTORE_BLOCK_WAL=/dev/sdc \
-e OSD_BLUESTORE_BLOCK_DB=/dev/sdc \
ceph/daemon osd
检查集群状态
[root@node1 ~]# docker exec mon ceph -s
cluster:
id: 47d6ebed-d923-4b1b-9d93-530913a016b3
health: HEALTH_OK
services:
mon: 3 daemons, quorum node1,node2,node3
mgr: node1(active), standbys: node3, node2
osd: 9 osds: 9 up, 9 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 18 GiB used, 28 TiB / 28 TiB avail
pgs:
分别在每个节点上执行以下命令
docker run -d --net=host \
--name rgw \
-v /var/lib/ceph/:/var/lib/ceph/ \
-v /etc/ceph:/etc/ceph \
ceph/daemon rgw
检查集群状态
[root@node1 ~]# docker exec mon ceph -s
cluster:
id: 47d6ebed-d923-4b1b-9d93-530913a016b3
health: HEALTH_WARN
too few PGs per OSD (10 < min 30)
services:
mon: 3 daemons, quorum node1,node2,node3
mgr: node1(active), standbys: node3, node2
osd: 9 osds: 9 up, 9 in
rgw: 3 daemons active
data:
pools: 4 pools, 32 pgs
objects: 187 objects, 1.1 KiB
usage: 18 GiB used, 28 TiB / 28 TiB avail
pgs: 32 active+clean
检查集群pools和集群的default-placement
[root@node1 ~]# docker exec mon ceph osd pool ls detail
pool 1 '.rgw.root' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 43 flags hashpspool stripe_width 0 application rgw
pool 2 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 46 flags hashpspool stripe_width 0 application rgw
pool 3 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 48 flags hashpspool stripe_width 0 application rgw
pool 4 'default.rgw.log' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 50 flags hashpspool stripe_width 0 application rgw
[root@node1 ~]# docker exec rgw radosgw-admin zone placement list --rgw-zone=default
[
{
"key": "default-placement",
"val": {
"index_pool": "default.rgw.buckets.index",
"data_pool": "default.rgw.buckets.data",
"data_extra_pool": "default.rgw.buckets.non-ec",
"index_type": 0,
"compression": ""
}
}
]
这里需要修改pg_num和pgp_num,一般情况这2个值是一样的,下面是官方对于pg_num取值的建议(注意下面表示的是所有pools的pg_num的总和的一个建议值)
修改pg_num和pgp_num
docker exec mon ceph osd pool set .rgw.root pg_num 32
docker exec mon ceph osd pool set .rgw.root pgp_num 32
docker exec mon ceph osd pool set default.rgw.control pg_num 32
docker exec mon ceph osd pool set default.rgw.control pgp_num 32
docker exec mon ceph osd pool set default.rgw.meta pg_num 32
docker exec mon ceph osd pool set default.rgw.meta pgp_num 32
docker exec mon ceph osd pool set default.rgw.log pg_num 32
docker exec mon ceph osd pool set default.rgw.log pgp_num 32
创建default-placement中用到的pools
docker exec mon ceph osd pool create default.rgw.buckets.index 32 32 replicated replicated_rule
docker exec mon ceph osd pool create default.rgw.buckets.data 256 256 replicated replicated_rule
docker exec mon ceph osd pool create default.rgw.buckets.non-ec 32 32 replicated replicated_rule
将新创建的pools关联到rgw上
docker exec mon ceph osd pool application enable default.rgw.buckets.index rgw
docker exec mon ceph osd pool application enable default.rgw.buckets.data rgw
docker exec mon ceph osd pool application enable default.rgw.buckets.non-ec rgw
检查集群中的pools
[root@node1 ~]# docker exec mon ceph osd pool ls detail
pool 1 '.rgw.root' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 62 lfor 0/60 flags hashpspool stripe_width 0 application rgw
pool 2 'default.rgw.control' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 66 lfor 0/64 flags hashpspool stripe_width 0 application rgw
pool 3 'default.rgw.meta' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 70 lfor 0/68 flags hashpspool stripe_width 0 application rgw
pool 4 'default.rgw.log' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 74 lfor 0/72 flags hashpspool stripe_width 0 application rgw
pool 7 'default.rgw.buckets.index' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 92 flags hashpspool stripe_width 0 application rgw
pool 8 'default.rgw.buckets.data' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 256 pgp_num 256 last_change 93 flags hashpspool stripe_width 0 application rgw
pool 9 'default.rgw.buckets.non-ec' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 last_change 94 flags hashpspool stripe_width 0 application rgw
检查集群状态
[root@node1 ~]# docker exec mon ceph -s
cluster:
id: 47d6ebed-d923-4b1b-9d93-530913a016b3
health: HEALTH_OK
services:
mon: 3 daemons, quorum node1,node2,node3
mgr: node1(active), standbys: node3, node2
osd: 9 osds: 9 up, 9 in
rgw: 3 daemons active
data:
pools: 7 pools, 448 pgs
objects: 187 objects, 1.1 KiB
usage: 18 GiB used, 28 TiB / 28 TiB avail
pgs: 448 active+clean