本实验以配置cinder两个后端存储为例,两后端为LVM和NFS不同的存储类型,LVMVolumeProvider放在cinder-volume服务所在节点,NFSVolumeProvider是独立的存放volume的节点,以区分存储节点(cinder-volume服务所在的节点)不一定就是生成volume的节点,cinder-volume节点就是实际存储池的一顶帽子。(具体的LVM和NFS服务器如何搭建,就不再详细说了,主要就是了解一下多后端的详细流程)
控制节点配置文件/etc/cinder/cinder.conf
[DEFAULT]
my_ip = 192.168.232.10
auth_strategy = keystone
debug = true
verbose = true
rpc_backend = rabbit
[database]
connection = mysql://cinder:CINDER_DBPASS@controller/cinder
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_user = cinder
admin_password = CINDER_PASS
admin_tenant_name = service
[oslo_messaging_rabbit]
rabbit_host = 192.168.232.10
rabbit_password = RABBIT_PASS
存储节点配置文件/etc/cinder/cinder.conf
[DEFAULT]
my_ip = 192.168.232.14
glance_host = 192.168.232.10
auth_strategy = keystone
iscsi_helper = tgtadm
debug = true
verbose = true
rpc_backend = rabbit
enabled_backends = lvmdriver-1,nfs
[lvmdriver-1]
lvm_type = default
volume_group = cinder-volumes //在LVMVolumeProvider用vgdisplay查看
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name = lvmdriver-1
[nfs]
nfs_mount_point_base = /nfs_storage //存储节点上创建此目录,是远程nfs目录的挂载点,注意该目录的权限和属主
nfs_shares_config = /etc/cinder/nfs_shares //该文件自行创建,内容是192.168.232.16:/storage,注意该文件权限和属主
volume_driver = cinder.volume.drivers.nfs.NfsDriver //driver的目录在/usr/lib/python2.7/site-packages/cinder/volume/driver/
volume_backend_name = nfs //设置卷类型,并把卷类型与volume_backend_name绑定,scheduler会根据卷类型过滤出符合条件的backend
//cinder type-create nfs
//cinder type-key nfs set volume_backend_name=nfs
[database]
connection = mysql://cinder:CINDER_DBPASS@controller/cinder
[keystone_authtoken]
auth_uri = http://controller:5000/v2.0
identity_uri = http://controller:35357
admin_user = cinder
admin_password = CINDER_PASS
admin_tenant_name = service
[oslo_messaging_rabbit]
rabbit_host = 192.168.232.10
rabbit_password = RABBIT_PASS