Ceph更多Mon 更多mds

1、当前状态
Ceph更多Mon 更多mds_第1张图片

2、在172.10.2.172(node2)再加入一个mon(mon.node2)
ssh node2

vim /etc/ceph/ceph.conf   加入mon.node2的相关配置
Ceph更多Mon 更多mds_第2张图片

ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring

monmaptool --create --add node1 172.10.2.172 --fsid e3de7f45-c883-4e2c-a26b-210001a7f3c2 /tmp/monmap

mkdir -p /var/lib/ceph/mon/ceph-node2

ceph-mon --mkfs -i node2 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring

touch /var/lib/ceph/mon/ceph-node2/done

/etc/init.d/ceph start 

Ceph更多Mon 更多mds_第3张图片

3、 在172.10.2.172(node2)再加入一个mds
vim /etc/ceph/ceph.conf   加入mds.node2的相关配置
/etc/init.d/ceph restart
Ceph更多Mon 更多mds_第4张图片
查看信息多了一个standby的mds
/etc/init.d/ceph stop mds 关掉一个后还有一个会接管
Ceph更多Mon 更多mds_第5张图片
附:配置文件
root@node1:/var/lib/ceph/osd# cat /etc/ceph/ceph.conf
[global]
fsid = 5e3a1bf3-9777-4311-a308-67a8c4b8fece
mon initial members = node1
mon host = 172.10.2.171
public network = 172.10.2.0/24
auth cluster required = none
auth service required = none
auth client required = none
osd journal size = 1024
filestore xattr use omap = true
osd pool default size = 2
osd pool default min size = 1
osd pool default pg num = 128
osd pool default pgp num = 128
osd crush chooseleaf type = 1

[mon.node1]
host = node1
mon addr = 172.10.2.171:6789

[mon.node2]
host = node2
mon addr = 172.10.2.172:6789

[osd.0]
host = node1
addr = 172.10.2.171:6789
osd data = /var/lib/ceph/osd/ceph-0

[osd.1]
host = node1
addr = 172.10.2.171:6789
osd data = /var/lib/ceph/osd/ceph-1

[osd.2]
host = node2
addr = 172.10.2.172:6789
osd data = /var/lib/ceph/osd/ceph-2

[osd.3]
host = node2
addr = 172.10.2.172:6789
osd data = /var/lib/ceph/osd/ceph-3

[mds.node1]
host = node1

[mds.node2]
host = node2

[mds]
max mds = 2



你可能感兴趣的:(on)