检查IP,且ping一下集群中的其他节点cluster network IP,看是否能ping通
ping mon
修改主机的/etc/hosts文件,并添加所有节点信息
检查防火墙是否关闭
firewall-cmd --state
cat /etc/selinux/config
sestatus -v
查看集群内各主机时间是否一致,ntp(chronyd):
date
systemctl status chronyd
配置yum源
在新OSD主机节点上安装ceph软件
yum install -y ceph
对所有日志盘和数据盘进行初始化
dd if=/dev/zero of=/dev/sda bs=1M count=1024
dd if=/dev/zero of=/dev/sdb bs=1M count=1024
dd if=/dev/zero of=/dev/sdc bs=1M count=1024
dd if=/dev/zero of=/dev/sdd bs=1M count=1024
dd if=/dev/zero of=/dev/sdf bs=1M count=1024
dd if=/dev/zero of=/dev/sdg bs=1M count=1024
dd if=/dev/zero of=/dev/sdh bs=1M count=1024
dd if=/dev/zero of=/dev/sdi bs=1M count=1024
dd if=/dev/zero of=/dev/sdj bs=1M count=1024
dd if=/dev/zero of=/dev/sdk bs=1M count=1024
dd if=/dev/zero of=/dev/sdl bs=1M count=1024
dd if=/dev/zero of=/dev/sdm bs=1M count=1024
dd if=/dev/zero of=/dev/sdn bs=1M count=1024
dd if=/dev/zero of=/dev/sdo bs=1M count=1024
对ssd进行分区
1 2048 41945087 20G Microsoft basic ceph_wal
2 41945088 167774207 60G Microsoft basic ceph_db
/sbin/parted /dev/sda -s mklabel gpt mkpart ceph_wal 2048s 41945087s
/sbin/parted /dev/sda -s mkpart ceph_db 41945088s 167774207s
/sbin/parted /dev/sda -s mkpart ceph_wal 167774208s 209717247s
/sbin/parted /dev/sda -s mkpart ceph_db 209717248s 335546367s
/sbin/parted /dev/sda -s mkpart ceph_wal 335546368s 377489407s
/sbin/parted /dev/sda -s mkpart ceph_db 377489408s 503318527s
/sbin/parted /dev/sdb -s mklabel gpt mkpart ceph_wal 2048s 41945087s
/sbin/parted /dev/sdb -s mkpart ceph_db 41945088s 167774207s
/sbin/parted /dev/sdb -s mkpart ceph_wal 167774208s 209717247s
/sbin/parted /dev/sdb -s mkpart ceph_db 209717248s 335546367s
/sbin/parted /dev/sdc -s mklabel gpt mkpart ceph_wal 2048s 41945087s
/sbin/parted /dev/sdc -s mkpart ceph_db 41945088s 167774207s
/sbin/parted /dev/sdc -s mkpart ceph_wal 167774208s 209717247s
/sbin/parted /dev/sdc -s mkpart ceph_db 209717248s 335546367s
/sbin/parted /dev/sdd -s mklabel gpt mkpart ceph_wal 2048s 41945087s
/sbin/parted /dev/sdd -s mkpart ceph_db 41945088s 167774207s
/sbin/parted /dev/sdd -s mkpart ceph_wal 167774208s 209717247s
/sbin/parted /dev/sdd -s mkpart ceph_db 209717248s 335546367s
/sbin/parted /dev/sdd -s mkpart ceph_wal 335546368s 377489407s
/sbin/parted /dev/sdd -s mkpart ceph_db 377489408s 503318527s
登录monitor
把配置信息拷贝到各monitor节点
ceph-deploy --overwrite-conf config push xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
在运行ceph-deploy工具的monitor节点上,把/etc/ceph/ceph.bootstrap-osd.keyring文件内容拷贝到所有monitor节点的/var/lib/ceph/bootstrap-osd目录下新建ceph.keyring文件
登陆OSD主机检查:
#cd /etc/ceph
#ll
[root@osd主机 ceph]#ls -l /var/lib/ceph/bootstrap-osd/ceph.keyring
-rw-r----- 1 root root 78 Aug 8 16:56 /var/lib/ceph/bootstrap-osd/ceph.keyring
[root@osd主机 ceph]#chown ceph:ceph /var/lib/ceph/bootstrap-osd/ceph.keyring
在monitor主机上创建主机:
ceph osd crush add-bucket xxxxxxxxxxx host
依次登陆各节点创建OSD:
ceph-volume lvm create --bluestore --data /dev/sdf --block.wal /dev/sda1 --block.db /dev/sda2
ceph-volume lvm create --bluestore --data /dev/sdg --block.wal /dev/sda3 --block.db /dev/sda4
ceph-volume lvm create --bluestore --data /dev/sdh --block.wal /dev/sda5 --block.db /dev/sda6
ceph-volume lvm create --bluestore --data /dev/sdi --block.wal /dev/sdb1 --block.db /dev/sdb2
ceph-volume lvm create --bluestore --data /dev/sdj --block.wal /dev/sdb3 --block.db /dev/sdb4
ceph-volume lvm create --bluestore --data /dev/sdk --block.wal /dev/sdc1 --block.db /dev/sdc2
ceph-volume lvm create --bluestore --data /dev/sdl --block.wal /dev/sdc3 --block.db /dev/sdc4
ceph-volume lvm create --bluestore --data /dev/sdm --block.wal /dev/sdd1 --block.db /dev/sdd2
ceph-volume lvm create --bluestore --data /dev/sdn --block.wal /dev/sdd3 --block.db /dev/sdd4
ceph-volume lvm create --bluestore --data /dev/sdo --block.wal /dev/sdd5 --block.db /dev/sdd6
建完后检查,看到多了个/var/lib/ceph/osd/ceph-xxx,xxx即为新增的osd编号,示例如下:
#df -h
Filesystem Size Used Avail Use% Mounted on
tmpfs 63G 48K 63G 1% /var/lib/ceph/osd/ceph-xxx
检查之前创建的OSD,发现OSD已经UP,并且在新OSD主机下挂着:
ceph osd tree
ceph -s
登录新OSD主机,检查osd编号,其关联的文件系统,block.db,block.wal
#df -h | grep -i -eFilesystem -e ceph-