概述
Ceph 作为分布式文件系统,不但具有高可靠性、高扩展性、高性能, 也是统一存储系统,支持对象存储、块存储、文件存储,本文介绍如何使用Ceph 块存储作为OpenStack的Glance、Nova、Cinder组件的后端存储
前面已经分享过ceph 集群的部署,下面如何让openstack在glance、cinder、nova组件后端配置使用ceph来进行存储
参考文档
http://docs.ceph.com/docs/master/rbd/rbd-openstack/
http://docs.openfans.org/ceph/ceph4e2d658765876863/ceph-1/copy_of_ceph-block-device3010ceph57578bbe59073011/openstack301057578bbe59077684openstack3011#u
环境信息
192.168.3.8 ceph node1 (部署mon,mds)
192.168.3.9 ceph node2 (部署osd)
192.168.3.10 ceph node3 (部署osd)
192.168.3.4 openstack (devstack 部署,包括 controller, network,compute)
说明
ceph 不支持qcow2格式的镜像,所以如果配置nova部分如果要启动虚拟机在ceph ,镜像必须是raw
配置glance
配置认证
1. 将node1节点/etc/ceph目录下的两个文件拷贝到openstack 的 controller节点和compute节点(我这里都在一个节点)
cd /etc/ceph/
scp ceph.conf ceph.client.admin.keyring 192.168.3.4:/etc/ceph/
2. 修改ceph.client.admin.keyring的权限
chmod +r /etc/ceph/ceph.client.admin.keyring
3. 在ceph148上创建glance的存储池
rados mkpool glance
4. 编辑openstack上glance的配置文件/etc/glance/glance-api.conf中如下配置项
rbd_store_ceph_conf = /etc/ceph/ceph.conf
stores = rbd (L 以前版本不需要这个)
default_store = rbd
rbd_store_user = admin (这里的用户名是上面cephx认证的用户)
rbd_store_pool = glance
rbd_store_chunk_size = 8
6,重启glance-api 服务
7,上传一个镜像
stack@map-VirtualBox:~/devstack$ glance image-create --name "cirros-0.3.2-x86_64-10" --disk-format qcow2 --container-format bare <./files/images/cirros-0.3.4-x86_64-uec/cirros-0.3.4-x86_64-blank.img +------------------+--------------------------------------+ | Property | Value | +------------------+--------------------------------------+ | checksum | eb9139e4942121f22bbc2afc0400b2a4 | | container_format | bare | | created_at | 2016-02-02T06:45:09Z | | disk_format | qcow2 | | id | 5d57ba01-a905-42b6-bc9b-39a10b8c7fcb | | min_disk | 0 | | min_ram | 0 | | name | cirros-0.3.2-x86_64-10 | | owner | bffb0c753d004509b0ef6ae9bd4777ba | | protected | False | | size | 25165824 | | status | active | | tags | [] | | updated_at | 2016-02-02T06:45:19Z | | virtual_size | None | | visibility | private | +------------------+--------------------------------------+
root@map-VirtualBox:/etc/glance# rbd ls images 5d57ba01-a905-42b6-bc9b-39a10b8c7fcb root@map-VirtualBox:/etc/glance#
9,查看 image 池中占用情况
root@map-VirtualBox:~# rados df pool name category KB objects clones degraded unfound rd rd KB wr wr KB cephfs_data - 0 0 0 0 0 0 0 0 0 cephfs_metadata - 0 0 0 0 0 0 0 0 0 data - 0 0 0 0 0 0 0 0 0 <span style="color:#ff0000;">images - 24577 6 0 0 0 113 87 20 24577</span> metadata - 2 20 0 0 0 6 5 31 8 nova - 0 0 0 0 0 0 0 0 0 rbd - 0 0 0 0 0 0 0 0 0 test - 137633 46 0 0 0 287 1110 642 132406 vms - 0 1 0 0 0 308 598 1572 43931 volumes - 0 0 0 0 0 0 0 0 0 total used 10891912 73 total avail 5085560 total space 15977472
1,创建nova pool
rados mkpool vms
root@map-VirtualBox:/etc/nova# rados mkpool nova successfully created pool nova root@map-VirtualBox:/etc/nova# rados lspools data metadata rbd cephfs_data cephfs_metadata test volumes images vms nova2,生成uuid
root@map-VirtualBox:/etc/nova# uuidgen ebdba075-59bc-4408-9a2c-d44b16d56bd33,创建screen
root@map-VirtualBox:/etc/nova# cat > secret.xml <<EOF > <secret ephemeral='no' private='no'> > <uuid>ebdba075-59bc-4408-9a2c-d44b16d56bd3</uuid> > <usage type='ceph'> > <name>client.admin secret</name> > </usage> > </secret> > EOF4,执行virsh secret-define --file secret.xml
root@map-VirtualBox:/etc/nova# virsh secret-define --file secret.xml Secret ebdba075-59bc-4408-9a2c-d44b16d56bd3 created5,执行如下命令,设置
root@map-VirtualBox:/etc/nova# cat /etc/ceph/ceph.client.admin.keyring [client.admin] key = AQDPoZRWcIQCDhAAwGapOCdp2uql2HJN2HpD9w== root@map-VirtualBox:/etc/nova# echo "AQDPoZRWcIQCDhAAwGapOCdp2uql2HJN2HpD9w==">key root@map-VirtualBox:/etc/nova# virsh secret-set-value --secret ebdba075-59bc-4408-9a2c-d44b16d56bd3 --base64 $(cat key) Secret value set
查看secret验证配置是否正确
root@map-VirtualBox:~# virsh secret-define error: command 'secret-define' requires <file> option root@map-VirtualBox:~# virsh secret-list UUID Usage -------------------------------------------------------------------------------- ebdba075-59bc-4408-9a2c-d44b16d56bd3 ceph client.admin secret root@map-VirtualBox:~# virsh secret-dumpxml ebdba075-59bc-4408-9a2c-d44b16d56bd3 <secret ephemeral='no' private='no'> <uuid>ebdba075-59bc-4408-9a2c-d44b16d56bd3</uuid> <usage type='ceph'> <name>client.admin secret</name> </usage> </secret> root@map-VirtualBox:~#
6,配置nova.conf
先备份nova.conf
root@map-VirtualBox:/etc/nova# ls api-paste.ini key nova.conf policy.json rootwrap.conf rootwrap.d secret.xml root@map-VirtualBox:/etc/nova# cp nova.conf nova.conf.bak编辑nova.conf 中如下内容
images_type=rbd
images_rbd_pool=vms
images_rbd_ceph_conf=/etc/ceph/ceph.conf
rbd_user=admin
rbd_secret_uuid=ebdba075-59bc-4408-9a2c-d44b16d56bd3
cpu_mode=none
7,重启nova-compute
8,创建一个虚拟机
9,查看ceph 池中的对象,刚创建的虚拟机已经到ceph池
root@map-VirtualBox:/etc/cinder# rados df pool name category KB objects clones degraded unfound rd rd KB wr wr KB cephfs_data - 0 0 0 0 0 0 0 0 0 cephfs_metadata - 0 0 0 0 0 0 0 0 0 data - 0 0 0 0 0 0 0 0 0 images - 49153 11 0 0 0 269 24785 40 49154 metadata - 2 20 0 0 0 6 5 31 8 nova - 0 0 0 0 0 0 0 0 0 rbd - 0 0 0 0 0 0 0 0 0 test - 137633 46 0 0 0 287 1110 642 132406 <span style="color:#ff0000;">vms - 37455 20 0 0 0 491 919 2400 78635</span> volumes - 0 0 0 0 0 0 0 0 0 total used 10990976 97 total avail 4986496 total space 15977472 root@map-VirtualBox:/etc/cinder# rbd ls vms 06aedb93-087f-4110-9d18-8428aa9ede29_disk 06aedb93-087f-4110-9d18-8428aa9ede29_disk.config 2a1ef5e0-97ee-4c41-8ad9-f253d019c5d2_disk 2a1ef5e0-97ee-4c41-8ad9-f253d019c5d2_disk.config root@map-VirtualBox:/etc/cinder#
stack@map-VirtualBox:~/devstack$ nova list +--------------------------------------+------+---------+------------+-------------+------------------+ | ID | Name | Status | Task State | Power State | Networks | +--------------------------------------+------+---------+------------+-------------+------------------+ | 06aedb93-087f-4110-9d18-8428aa9ede29 | sdf | ACTIVE | - | Running | private=10.0.0.3 | | 2a1ef5e0-97ee-4c41-8ad9-f253d019c5d2 | we | SHUTOFF | - | Shutdown | private=10.0.0.2 | +--------------------------------------+------+---------+------------+-------------+------------------+ stack@map-VirtualBox:~/devstack$