问题:ceph4虚拟机异常,无法开机启动
处理办法:删除osd,重新搭建ceph4服务器
[root@ceph1 ~]# #查看ceph集群状态
[root@ceph1 ~]# ceph -s
cluster:
id: edc14ccc-1be5-11ee-9de8-005056b00968
health: HEALTH_WARN
1 hosts fail cephadm check
1/5 mons down, quorum ceph1,ceph5,ceph3,ceph2
services:
mon: 5 daemons, quorum ceph1,ceph5,ceph3,ceph2 (age 14h), out of quorum: ceph4
mgr: ceph1.cbajav(active, since 14h), standbys: ceph2.tqtbwn
mds: cephfs:1 {0=cephfs.ceph5.fvxuqu=up:active} 3 up:standby
osd: 10 osds: 8 up (since 14h), 8 in (since 14h)
data:
pools: 4 pools, 97 pgs
objects: 416 objects, 1.3 GiB
usage: 12 GiB used, 588 GiB / 600 GiB avail
pgs: 97 active+clean
#查看osd状态
[root@ceph1 ~]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.73473 root default
-3 0.14648 host ceph1
0 hdd 0.04880 osd.0 up 1.00000 1.00000
2 hdd 0.09769 osd.2 up 1.00000 1.00000
-5 0.14648 host ceph2
1 hdd 0.04880 osd.1 up 1.00000 1.00000
3 hdd 0.09769 osd.3 up 1.00000 1.00000
-11 0.14648 host ceph3
7 hdd 0.04880 osd.7 up 1.00000 1.00000
9 hdd 0.09769 osd.9 up 1.00000 1.00000
-9 0.14879 host ceph4
6 hdd 0.04880 osd.6 down 0 1.00000
8 hdd 0.09999 osd.8 down 0 1.00000
-7 0.14648 host ceph5
4 hdd 0.04880 osd.4 up 1.00000 1.00000
5 hdd 0.09769 osd.5 up 1.00000 1.00000
[root@ceph1 ~]#
1.调整osd的crush weight
ceph osd crush reweight osd.8 0.1
说明:这个地方如果想慢慢的调整就分几次将crush 的weight 减低到0 ,这个过程实际上是让数据不分布在这个节点上,让数据慢慢的分布到其他节点上,直到最终为没有分布在这个osd,并且迁移完成
这个地方不光调整了osd 的crush weight ,实际上同时调整了host 的 weight ,这样会调整集群的整体的crush 分布,在osd 的crush 为0 后, 再对这个osd的任何删除相关操作都不会影响到集群的数据的分布
2.停止osd进程
#停止到osd的进程,这个是通知集群这个osd进程不在了,不提供服务了,因为本身没权重,就不会影响到整体的分布,也就没有迁移
ceph osd stop osd.8
3.将节点状态标记为out
#停止到osd的进程,这个是通知集群这个osd不再映射数据了,不提供服务了,因为本身没权重,就不会影响到整体的分布,也就没有迁移
ceph osd out osd.8
4.从crush中移除节点
#这个是从crush中删除,因为已经是0了 所以没影响主机的权重,也就没有迁移了
ceph osd crush remove osd.8
5.删除节点
ceph osd rm osd.8
6.删除节点认证(不删除编号会占住)
ceph auth del osd.8
7.验证集群信息
[root@ceph1 ~]# ceph -s
cluster:
id: edc14ccc-1be5-11ee-9de8-005056b00968
health: HEALTH_WARN
1 hosts fail cephadm check
1/5 mons down, quorum ceph1,ceph5,ceph3,ceph2
services:
mon: 5 daemons, quorum ceph1,ceph5,ceph3,ceph2 (age 39m), out of quorum: ceph4
mgr: ceph1.cbajav(active, since 38m), standbys: ceph2.tqtbwn
mds: cephfs:1 {0=cephfs.ceph5.fvxuqu=up:active} 3 up:standby
osd: 9 osds: 8 up (since 38m), 8 in (since 28m)
data:
pools: 4 pools, 97 pgs
objects: 416 objects, 1.3 GiB
usage: 12 GiB used, 588 GiB / 600 GiB avail
pgs: 97 active+clean
[root@ceph1 ~]#
[root@ceph1 ~]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.63474 root default
-3 0.14648 host ceph1
0 hdd 0.04880 osd.0 up 1.00000 1.00000
2 hdd 0.09769 osd.2 up 1.00000 1.00000
-5 0.14648 host ceph2
1 hdd 0.04880 osd.1 up 1.00000 1.00000
3 hdd 0.09769 osd.3 up 1.00000 1.00000
-11 0.14648 host ceph3
7 hdd 0.04880 osd.7 up 1.00000 1.00000
9 hdd 0.09769 osd.9 up 1.00000 1.00000
-9 0.04880 host ceph4
6 hdd 0.04880 osd.6 down 0 1.00000
-7 0.14648 host ceph5
4 hdd 0.04880 osd.4 up 1.00000 1.00000
5 hdd 0.09769 osd.5 up 1.00000 1.00000
[root@ceph1 ~]#
ceph osd crush reweight osd.6 0.1
ceph osd stop osd.6
ceph osd out osd.6
ceph osd crush remove osd.6
ceph osd rm osd.6
ceph auth del osd.6
#验证集群
ceph -s
ceph osd tree
wget https://github.com/ceph/ceph/raw/v15.2.17/src/cephadm/cephadm
chmod +x cephadm #这里提前下载,脚本里的已注释掉
yum install python3 -y
# 1、安装 docker
# 华为源
wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
sed -i 's+download.docker.com+repo.huaweicloud.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
yum makecache fast && yum install docker-ce -y
systemctl enable docker --now
# 查看镜像
grep quay.io cephadm |awk '{print $3}' |grep -n '.*'
# 下载镜像
grep quay.io cephadm |awk '{print $3}' |xargs -i docker pull {}
#指定ceph版本15.2.17
./cephadm add-repo --release 15.2.17
#安装ceph版本15.2.17
yum install ceph-common -y
#拷贝主节点文件
scp -r root@ceph1:/etc/ceph/* /etc/ceph/
ceph orch apply osd --all-available-devices
ceph -s