celint eth0192.168.4.10
node1 eth0 192.168.4.11
node2 eth1 192.168.4.12
node3 eth1 192.168.4.13
需要在4台虚拟机上扩容,防止后面空间不足出错
mkdir /var/ftp/ceph
mount ceph10.iso /var/ftp/ceph/
ssh-keygen -f /root/.ssh/id_rsa -N ‘’
for i in 10 11 12 13
do
ssh-copy-id 192.168.4.$i
done
vim /etc/hosts
… …
192.168.4.10 client
192.168.4.11 node1
192.168.4.12 node2
192.168.4.13 node3
for i in 10 11 12 13
do
scp /etc/hosts 192.168.4.$i:/etc/
done
cat /etc/yum.repos.d/ceph.repo
[mon]
name=mon
baseurl=ftp://192.168.4.254/ceph/MON
gpgcheck=0
[osd]
name=osd
baseurl=ftp://192.168.4.254/ceph/OSD
gpgcheck=0
[tools]
name=tools
baseurl=ftp://192.168.4.254/ceph/Tools
gpgcheck=0
yum repolist
for i in 10 11 12 13
do
scp /etc/yum.repos.d/ceph.repo 192.168.4.$i:/etc/yum.repos.d/
done
vim /etc/chrony.conf
… …
server 192.168.4.254 iburst
for i in 10 11 12 13
do
scp /etc/chrony.conf 192.168.4. i : / e t c / s s h 192.168.4. i:/etc/ ssh 192.168.4. i:/etc/ssh192.168.4.i “systemctl restart chronyd”
done
真机打开虚拟机服务界面
virt-manager
。。。
/////准备环境完成
yum -y install ceph-deploy
ceph-deploy --help
ceph-deploy mon --help
cd /root
mkdir ceph-cluster
cd ceph-cluster/
cd /root/ceph-cluster
ceph-deploy new node1 node2 node3
cd /root/ceph-cluster
for i in node1 node2 node3
do
ssh $i “yum -y install ceph-mon ceph-osd ceph-mds ceph-radosgw”
done
cd /root/ceph-cluster
ceph-deploy mon create-initial
node1常见错误及解决方法(非必要操作,有错误可以参考):
如果提示如下错误信息:
admin_socket: exception getting command descriptions: [Error 2] No such file or directory
排错方法:
echo “public_network = 192.168.4.0/24” >> /root/ceph.conf
ceph-deploy --overwrite-conf config push node1 node2 node3
备注:vdb1和vdb2这两个分区用来做存储服务器的journal缓存盘
for i in node1 node2 node3
do
ssh $i “parted /dev/vdb mklabel gpt”
ssh $i “parted /dev/vdb mkpart primary 1 50%”
ssh $i “parted /dev/vdb mkpart primary 50% 100%”
done
for i in node1 node2 node3
do
ssh $i “chown ceph:ceph /dev/vdb1”
ssh $i “chown ceph:ceph /dev/vdb2”
echo “ENV{DEVNAME}"/dev/vdb1",OWNER=“ceph”,GROUP=“ceph”" >> /etc/udev/rules.d/70-vdb.rules
echo "ENV{DEVNAME}”/dev/vdb2",OWNER=“ceph”,GROUP=“ceph”" >> /etc/udev/rules.d/70-vdb.rules
done
ceph-deploy disk zap node1:vdc node1:vdd
ceph-deploy disk zap node2:vdc node2:vdd
ceph-deploy disk zap node3:vdc node3:vdd
重要:这里容易出错!将主机名、设备名称输入错误!!!
ceph-deploy osd create node1:vdc:/dev/vdb1 node1:vdd:/dev/vdb2
ceph-deploy osd create node2:vdc:/dev/vdb1 node2:vdd:/dev/vdb2
ceph-deploy osd create node3:vdc:/dev/vdb1 node3:vdd:/dev/vdb2
常见错误及解决方法(非必须操作)。
使用osd create创建OSD存储空间时,如提示下面的错误提示:
[ceph_deploy][ERROR ] RuntimeError: bootstrap-osd keyring not found; run ‘gatherkeys’
可以使用如下命令修复文件,重新配置ceph的密钥文件:
ceph-deploy gatherkeys node1 node2 node3
ceph -s
如果查看状态包含如下信息:
health: HEALTH_WARN
clock skew detected on node2, node3…
clock skew表示时间不同步,解决办法:请先将所有主机的时间都使用NTP时间同步!!!
Ceph要求所有主机时差不能超过0.05s,否则就会提示WARN,如果使用NTP还不能精确同步时间,可以手动修改所有主机的ceph.conf,在[MON]下面添加如下一行:
mon clock drift allowed = 1
1.查看存储池。
ceph osd lspools
2.创建镜像、查看镜像
rbd create demo-image --image-feature layering --size 10G
或
rbd create rbd/image --image-feature layering --size 10G
rbd list
rbd info demo-image
1.缩小容量
rbd resize --size 7G image --allow-shrink
rbd info image
2.扩容容量
rbd resize --size 15G image
rbd info image
1.客户端通过KRBD访问 client上操作
yum -y install ceph-common
scp 192.168.4.11:/etc/ceph/ceph.conf /etc/ceph/
scp 192.168.4.11:/etc/ceph/ceph.client.admin.keyring /etc/ceph/
rbd map image
lsblk
rbd showmapped
image snap device
2.在客户端格式化、挂载分区,在client上操作
mkfs.xfs /dev/rbd0
mount /dev/rbd0 /mnt/
echo “test” > /mnt/test.txt
1.查看镜像快照,在node1上操作
rbd snap ls image
2.给镜像创建快照,在node1上操作
在node1上操作
rbd snap create image --snap image-snap1
rbd snap ls image
3.删除客户端写入的测试文件,在client上操作
rm -rf /mnt/test.txt
umount /mnt
4.还原快照,在node上操作
rbd snap rollback image --snap image-snap1
1.克隆快照,在node1上操作
rbd snap protect image --snap image-snap1
rbd snap rm image --snap image-snap1
rbd clone image --snap image-snap1 image-clone --image-feature layering
2.查看克隆镜像与父镜像快照的关系 在node1上操作
rbd info image-clone
rbd flatten image-clone
rbd info image-clone
1) 客户端撤销磁盘映射
[root@client ~]# umount /mnt
[root@client ~]# rbd showmapped
[root@client ~]# rbd unmap /dev/rbd0
end