Ceph 是一种为优秀的性能、可靠性和可扩展性而设计的统一的、分布式文件系统。Ceph 的统一体现在可以提供文件系统、块存储和对象存储,分布式体现在可以动态扩展。在国内一些公司的云环境中,通常会采用 Ceph 作为 OpenStack 的唯一后端存储来提高数据转发效率。
Ceph是开源的分布式存储系统,主要分为对象存储、块设备存储、文件系统服务
Ceph核心组件包括:Ceph OSDs、Monitors、Managers、MDSs。Ceph存储集群至少需要一个Ceph Monitor,Ceph Manager和Ceph OSD(对象存储守护进程)。运行Ceph Filesystem客户端时也需要Ceph元数据服务器( Metadata Server )。
本实验主要部署的是OpenStack(R版)+CEPH,OpenStack前文已经部署过了,可参考OpenStack-Rocky本地源多节点安装
环境地址规划
主机名 | 内存(G) | 硬盘(G) | 网卡 | 系统 |
---|---|---|---|---|
control | 8 | 300+1024 | VM1:192.168.100.10、NAT:192.168.7.145 | Centos-7.5 |
compute1 | 8 | 300+1024 | VM1:192.168.100.11 | Centos-7.5 |
compute2 | 8 | 300+1024 | VM1:192.168.100.12 | Centos-7.5 |
1、安装环境部署
(1)更改hosts文件设置主机名
(2)关闭防火墙和核心功能
(3)各节点节点做免交互
(4)三个节点iptables 防火墙关闭
(5)检查时间同步
2、三个节点安装Python-setuptools工具
[root@ct ~]# yum -y install python-setuptools
[root@c1 ~]# yum -y install python-setuptools
[root@c2 ~]# yum -y install python-setuptools
3、在控制节点,创建ceph配置文件目录,安装ceph-deploy
[root@ct ~]# mkdir -p /etc/ceph
[root@ct ~]# yum -y install ceph-deploy
4、在三个节点安装ceph软件
[root@ct ~]# yum -y install ceph
[root@c1 ~]# yum -y install ceph
[root@c2 ~]# yum -y install ceph
5、创建三个mon
#进入控制节点/etc/ceph的目录
[root@ct ceph]# ceph-deploy new ct c1 c2
[root@ct ceph]# more /etc/ceph/ceph.conf
[global]
fsid = 93c3e7cc-1064-40b0-939b-cc2fb5a3e86d
mon_initial_members = ct, c1, c2
mon_host = 192.168.100.10,192.168.100.11,192.168.100.12
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
6、初始化mon 并收集秘钥(三个节点)
[root@ct ceph]# ceph-deploy mon create-initial
[root@ct ceph]# ll
total 64
-rw------- 1 root root 113 Mar 15 05:47 ceph.bootstrap-mds.keyring
-rw------- 1 root root 113 Mar 15 05:47 ceph.bootstrap-mgr.keyring
-rw------- 1 root root 113 Mar 15 05:47 ceph.bootstrap-osd.keyring
-rw------- 1 root root 113 Mar 15 05:47 ceph.bootstrap-rgw.keyring
-rw------- 1 root root 151 Mar 15 05:47 ceph.client.admin.keyring
-rw-r--r-- 1 root root 232 Mar 15 05:47 ceph.conf
-rw-r--r-- 1 root root 29766 Mar 15 05:47 ceph-deploy-ceph.log
-rw------- 1 root root 73 Mar 15 05:46 ceph.mon.keyring
-rw-r--r-- 1 root root 92 Jun 3 2019 rbdmap
7、创建OSD
#进入控制节点/etc/ceph的目录
[root@ct ceph]# ceph-deploy osd create --data /dev/sdb ct
[root@ct ceph]# ceph-deploy osd create --data /dev/sdb c1
[root@ct ceph]# ceph-deploy osd create --data /dev/sdb c2
8、使用ceph-deploy下发配置文件和admin秘钥下发到ct、c1、c2
#进入控制节点/etc/ceph的目录
[root@ct ceph]# ceph-deploy admin ct c1 c2
9、给ct、c1、c2 每个节点的keyring增加读的权限
[root@ct ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring
[root@c1 ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring
[root@c2 ceph]# chmod +x /etc/ceph/ceph.client.admin.keyring
10、创建mgr管理服务
#进入控制节点/etc/ceph的目录
[root@ct ceph]# ceph-deploy mgr create ct c1 c2
11、查看ceph集群状态
[root@ct ceph]# ceph -s
cluster:
id: 93c3e7cc-1064-40b0-939b-cc2fb5a3e86d
health: HEALTH_OK
services:
mon: 3 daemons, quorum ct,c1,c2
mgr: ct(active), standbys: c1, c2
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 3.0 GiB used, 3.0 TiB / 3.0 TiB avail
pgs:
12、创建三个与openstack对接的pool(volumes、vms、images)64是PG
[root@ct ceph]# ceph osd pool create volumes 64
pool 'volumes' created
[root@ct ceph]# ceph osd pool create vms 64
pool 'vms' created
[root@ct ceph]# ceph osd pool create images 64
pool 'images' created
13、查看CEPH状态
#查看monitor状态
[root@ct ceph]# ceph mon stat
e1: 3 mons at {c1=192.168.100.11:6789/0,c2=192.168.100.12:6789/0,ct=192.168.100.10:6789/0}, election epoch 12, leader 0 ct, quorum 0,1,2 ct,c1,c2
#查看osd状态
[root@ct ceph]# ceph osd status
+----+------+-------+-------+--------+---------+--------+---------+-----------+
| id | host | used | avail | wr ops | wr data | rd ops | rd data | state |
+----+------+-------+-------+--------+---------+--------+---------+-----------+
| 0 | ct | 1027M | 1022G | 0 | 0 | 0 | 0 | exists,up |
| 1 | c1 | 1027M | 1022G | 0 | 0 | 0 | 0 | exists,up |
| 2 | c2 | 1027M | 1022G | 0 | 0 | 0 | 0 | exists,up |
+----+------+-------+-------+--------+---------+--------+---------+-----------+
#查看创建的pools池
[root@ct ceph]# ceph osd lspools
1 volumes
2 vms
3 images
1、搭建之前要查看CEPH的状态,不能出现ERROR
2、启用dashboard模块
[root@ct ceph]# ceph mgr module enable dashboard
You have new mail in /var/spool/mail/root
3、创建https证书
[root@ct ceph]# ceph dashboard create-self-signed-cert
Self-signed certificate created
4、查看mgr服务
[root@ct ceph]# ceph mgr services
{
"dashboard": "https://ct:8443/"
}
5、设置账号密码
[root@ct ceph]# ceph dashboard set-login-credentials admin 123
Username and password updated
6、在浏览器中打开ceph网页登陆
使用地址https://192.168.100.10:8443
1、对接环境初始化准备
(1)控制节点创建client.cinder并设置权限
[root@ct ~]# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=volumes,allow rwx pool=vms,allow rx pool=images'
[client.cinder]
key = AQA+/21eaF4oGxAAB11+LkfR/hnWlNDVoJbjBQ==
(2)控制节点创建client.glance并设置权限
[root@ct ~]# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children,allow rwx pool=images'
[client.glance]
key = AQBs/21ef9cUMxAArcA/N49H2qYdGRrXeswwLQ==
(3)传送秘钥到对接的节点,因为glance自身就装在控制节点所以不需要发送到其他的节点
[root@ct ~]# ceph auth get-or-create client.glance |tee /etc/ceph/ceph.client.glance.keyring
[client.glance]
key = AQBs/21ef9cUMxAArcA/N49H2qYdGRrXeswwLQ==
[root@ct ~]# chown glance.glance /etc/ceph/ceph.client.glance.keyring
(4)传送秘钥到对接的节点,client.cinder节点默认也是安装在controller上 ,所以不需要传递到其他节点,如果在其他节点就执行第一条语句
[root@ct ~]# ceph auth get-or-create client.cinder | tee /etc/ceph/ceph.client.cinder.keyring
[client.cinder]
key = AQA+/21eaF4oGxAAB11+LkfR/hnWlNDVoJbjBQ==
[root@ct ~]# chown cinder.cinder /etc/ceph/ceph.client.cinder.keyring
(5)同时也需要将client.cinder传递到计算节点
由于计算节点需要将用户的client.cinder用户的密钥文件存储在libvirt中,所以需要执行如下操作
[root@ct ~]# ceph auth get-key client.cinder |ssh c1 tee client.cinder.key
AQA+/21eaF4oGxAAB11+LkfR/hnWlNDVoJbjBQ==
[root@ct ~]# ceph auth get-key client.cinder |ssh c2 tee client.cinder.key
AQA+/21eaF4oGxAAB11+LkfR/hnWlNDVoJbjBQ==
(6)在运行nova-compute的计算节点讲临时密钥文件添加到libvirt 中然后删除
配置libvirt secret,KVM虚拟机需要使用librbd才可以访问ceph集群,Librbd访问ceph又需要账户认证,因此在这里,需要给libvirt设置账户信息
在计算节点操作,两个节点的操作相同
①生成UUID,两个计算节点使用同一个uuid(c1、c2节点任一节点生成)
[root@c1 ~]# uuidgen
a935d2ae-a7b5-47b6-9fb8-555e513ac63e
②用如下内容创建一个秘钥文件确保使用上一步骤中生成唯一的UUID(c1、c2节点)
[root@c1 ~]# cat >secret.xml <
> a935d2ae-a7b5-47b6-9fb8-555e513ac63e
>
> client.cinder secret
>
>
> EOF
③定义秘钥,并将其保存。后续步骤中使用这个秘钥(c1、c2节点)
[root@c1 ~]# virsh secret-define --file secret.xml
Secret a935d2ae-a7b5-47b6-9fb8-555e513ac63e created
④设置秘钥并删除临时文件。删除文件的步骤是选的的,步骤是保持系统的纯净(c1、c2节点)
[root@c1 ~]# virsh secret-set-value --secret a935d2ae-a7b5-47b6-9fb8-555e513ac63e --base64 $(cat client.cinder.key) && rm -rf client.cinder.key secret.xml
Secret value set
2、CEPH对接Glance(登录到glance所在的节点然后修改)
(1)备份配置文件
[root@ct ~]# cp /etc/glance/glance-api.conf /etc/glance/glance-api.conf.bak
(2)修改对接配置文件
[root@ct ~]# vi /etc/glance/glance-api.conf
#2054行、存储的类型格式
stores = rbd
#2108行,修改默认的存储格式类型
default_store = rbd
#2442行,默认储存本地注销掉
#filesystem_store_datadir = /var/lib/glance/images/
#2605行,去掉注释
rbd_store_chunk_size = 8
#2626行,去掉注释
rbd_store_pool = images
#2645行,去掉注释、指定glance用户,不知道可以查
rbd_store_user = glance
#2664行,去掉注释 指定CEPH的路径
rbd_store_ceph_conf = /etc/ceph/ceph.conf
#检查glance用户
[root@ct ~]# source keystonerc_admin
[root@ct ~(keystone_admin)]# openstack user list | grep glance
| 8c85dfb8755141858ff56cf0bc63eebe | glance |
(3)重启openstack-glance-api服务
[root@ct ~]# systemctl restart openstack-glance-api
(4)上传镜像测试
①打开网页,添加一个新的镜像
②查看ceph镜像大小
[root@ct ~]# ceph df
GLOBAL:
SIZE AVAIL RAW USED %RAW USED
3.0 TiB 3.0 TiB 3.0 GiB 0.10
POOLS:
NAME ID USED %USED MAX AVAIL OBJECTS
volumes 1 0 B 0 972 GiB 0
vms 2 0 B 0 972 GiB 0
images 3 13 MiB 0 972 GiB 8
③查看镜像ID
[root@ct ~]# rbd ls images
60fcda1c-8931-48e5-950d-de5acd288d23
④本地没有镜像
[root@ct ~]# ls /var/lib/glance/images
[root@ct ~]#
3、ceph与cinder对接
(1)备份cinder.conf配置文件
[root@ct ~]# cp /etc/cinder/cinder.conf /etc/cinder/cinder.conf.bak
(2)修改配置文件
[root@ct ~]# vi /etc/cinder/cinder.conf
#409行,将lvm更改为ceph
enabled_backends = ceph
#把lvm模块注释掉,插入ceph模块内容
#[lvm]
#volume_backend_name=lvm
#volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver
#iscsi_ip_address=192.168.100.10
#iscsi_helper=lioadm
#volume_group=cinder-volumes
#volumes_dir=/var/lib/cinder/volumes
[ceph]
default_volume_type=ceph
glance_api_version = 2
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = ceph
rbd_pool = volumes
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = -1
rbd_user = cinder
rbd_secret_uuid = a935d2ae-a7b5-47b6-9fb8-555e513ac63e
(3)重启cinder服务
[root@ct ~]# systemctl restart openstack-cinder-volume
(4)查看cinder卷的类型
[root@ct ~]# source keystonerc_admin
[root@ct ~(keystone_admin)]# cinder type-list
+--------------------------------------+-------+-------------+-----------+
| ID | Name | Description | Is_Public |
+--------------------------------------+-------+-------------+-----------+
| 3dc60589-03ec-4e22-b394-5eceab37509b | iscsi | - | True |
+--------------------------------------+-------+-------------+-----------+
(5)命令行创建cinder 的ceph存储后端相应的type
#添加ceph卷类型
[root@ct ~(keystone_admin)]# cinder type-create ceph
+--------------------------------------+------+-------------+-----------+
| ID | Name | Description | Is_Public |
+--------------------------------------+------+-------------+-----------+
| c5e19c98-4fa5-4208-9590-8d9e925d28b4 | ceph | - | True |
+--------------------------------------+------+-------------+-----------+
#查看卷类型
[root@ct ~(keystone_admin)]# cinder type-list
+--------------------------------------+-------+-------------+-----------+
| ID | Name | Description | Is_Public |
+--------------------------------------+-------+-------------+-----------+
| 3dc60589-03ec-4e22-b394-5eceab37509b | iscsi | - | True |
| c5e19c98-4fa5-4208-9590-8d9e925d28b4 | ceph | - | True |
+--------------------------------------+-------+-------------+-----------+
#设置后端的存储类型 volume_backend_name=ceph
[root@ct ~(keystone_admin)]# cinder type-key ceph set volume_backend_name=ceph
[root@ct ~]# rbd ls volumes
volume-8a310745-fdf3-4622-b65d-aac4bdb39180
4、ceph与nova对接
注意事项:对接之前OpenStack中不能有实例
(1)备份配置文件(C1、C2节点)
[root@c1 ~]# cp /etc/nova/nova.conf /etc/nova/nova.conf.bak
(2)修改配置文件(C1、C2节点)
[root@c1 ~]# vi /etc/nova/nova.conf
#7072行、去掉注释、修改类型RBD
images_type = rbd
#7096行、去掉注释、改为VMS在CEPH中声明的
images_rbd_pool = vms
#7099行、去掉注释、添加CEPH配置文件路径
images_rbd_ceph_conf = /etc/ceph/ceph.conf
#7256行、去掉注释、添加cinder
rbd_user = cinder
#7261行、去掉注释、添加UUID值
rbd_secret_uuid =a935d2ae-a7b5-47b6-9fb8-555e513ac63e
#6932行、去掉注释、添加"network=writeback"硬盘缓存模式
disk_cachemodes="network=writeback"
#找到live_migration附近添加整行 是否启用热迁移
live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
#7114行、去掉注释、添加unmap
hw_disk_discard=unmap
(3)安装Libvirt(C1、C2节点)
[root@c1 ~]# yum -y install libvirt
(4)编辑计算节点(C1、C2节点)
[root@c1 ~]# vi /etc/ceph/ceph.conf
#直接添加
[client]
rbd cache=true
rbd cache writethrough until flush=true
admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
log file = /var/log/qemu/qemu-guest-$pid.log
rbd concurrent management ops = 20
#创建上面对应的文件夹,赋予权限
[root@c1 ~]# mkdir -p /var/run/ceph/guests/ /var/log/qemu/
[root@c1 ~]# chown 777 -R /var/run/ceph/guests/ /var/log/qemu/
(5)将控制节点的/ect/ceph/下的密钥下发到c1、c2节点
[root@ct ceph]# scp ceph.client.cinder.keyring root@c1:/etc/ceph
ceph.client.cinder.keyring 100% 64 24.7KB/s 00:00
[root@ct ceph]# scp ceph.client.cinder.keyring root@c2:/etc/ceph
ceph.client.cinder.keyring 100% 64 42.4KB/s 00:00
(6)计算节点重启服务
[root@c1 ~]# systemctl restart libvirtd
[root@c1 ~]# systemctl restart openstack-nova-compute
[root@c2 ceph]# rbd ls vms
957ffb1b-2d55-436b-9a85-32f171d3e1d5_disk