Install and Configure OpenStack Block Storage (Cinder)

Based on OpenStack Icehouse release


Install and Configure OpenStack Block Storage (Cinder)

OpenStack Block Storage Service Controller setup

yum -y install openstack-cinder

mysql -uroot -p
mysql> create database cinder;
mysql> grant all privileges on cinder.* to 'cinder'@'localhost' identified by 'CINDER-DBPASS';
mysql> grant all privileges on cinder.* to 'cinder'@'%' identified by 'CINDER-DBPASS';
mysql> flush privileges;

vi /etc/cinder/cinder.conf
[database]
connection=mysql://cinder:cinder@MYSQL-SERVER/cinder

cinder-manage db sync


vi /etc/cinder/cinder.conf

auth_strategy=keystone

auth_host=controller
auth_port=35357
auth_protocol=http

auth_uri=http://controller:5000

admin_user=cinder
admin_password=CINDER-USER-PASSWORD
admin_tenant_name=service

rpc_backend=cinder.openstack.common.rpc.impl_qpid
qpid_hostname=controller


# add cinder user (set in service tenant)
keystone user-create --tenant service --name cinder --pass CINDER-USER-PASSWORD

# add cinder user in admin role
keystone user-role-add --user cinder --tenant service --role admin


# add service for cinder v1
keystone service-create --name=cinder --type=volume --description="Cinder Service"

# add endpoint for cinder v1
keystone endpoint-create --region RegionOne --service cinder --publicurl=http://controller:8776/v1/%\(tenant_id\)s --internalurl=http://controller:8776/v1/%\(tenant_id\)s --adminurl=http://controller:8776/v1/%\(tenant_id\)s

# add service for cinder v2
keystone service-create --name=cinderv2 --type=volumev2 --description="Cinder Service V2"

# add endpoint for cinder v2
keystone endpoint-create --region RegionOne --service cinderv2 --publicurl=http://controller:8776/v2/%\(tenant_id\)s --internalurl=http://controller:8776/v2/%\(tenant_id\)s --adminurl=http://controller:8776/v2/%\(tenant_id\)s


chown -R cinder:cinder /etc/cinder /var/log/cinder

service openstack-cinder-api start; chkconfig openstack-cinder-api on
service openstack-cinder-scheduler start; chkconfig openstack-cinder-scheduler on

OpenStack Block Storage Service node setup-- node1

1. service NetworkManager stop; chkconfig NetworkManager off
service network start; chkconfig network on

disable firewall and selinux
service iptables stop; chkconfig iptables off
service ip6tables stop; chkconfig ip6tables off

2. eth0 for management/public/floating (192.168.1.0/24), eth1 for internal/flat (192.168.20.0/24), it's recommended to use seperated nic for management network


3. set hostname in /etc/sysconfig/network and /etc/hosts
192.168.1.10    controller
192.168.1.11    node1

4. yum -y install ntp
vi /etc/ntp.conf
server 192.168.1.10
restrict 192.168.1.10

service ntpd start; chkconfig ntpd on

5. yum -y install  http://repos.fedorapeople.org/repos/openstack/openstack-icehouse/rdo-release-icehouse-3.noarch.rpm

yum -y install http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
yum -y install mysql MySQL-python


6. yum -y install openstack-cinder scsi-target-utils

using LVM/NFS/GlusterFS multi-storage back_end for cinder volume

on node1 for LVM:

vi /etc/lvm/lvm.conf
devices {
...
filter = [ "a/sda/", "a/sdb/", "a/sdc/","r/.*/"]
...
}


vi /etc/tgt/targets.conf
include /etc/cinder/volumes/*


pvcreate /dev/sdb; vgcreate -s 32M cinder-volumes-1 /dev/sdb

pvcreate /dev/sdc; vgcreate -s 32M cinder-volumes-2 /dev/sdc



7. on node1 for nfs

yum -y install nfs-utils
service rpcbind start; service nfslock start


# to test mounting is ok

mount NFS-SERVER:/share /test

mount; umount /test


vi /etc/cinder/nfs_shares
NFS-SERVER:/share

chown root:cinder /etc/cinder/nfs_shares; chmod 640 /etc/cinder/nfs_shares


8. on node1 for GlusterFS

yum  -y install glusterfs-fuse


vi /etc/cinder/glusterfs_shares
GLUSTERFS-SERVER:/vol01

chown root:cinder /etc/cinder/glusterfs_shares; chmod 640 /etc/cinder/glusterfs_shares


9. vi /etc/cinder/cinder.conf

auth_strategy=keystone

auth_host=controller
auth_port=35357
auth_protocol=http

auth_uri=http://controller:5000

admin_user=cinder
admin_password=CINDER-USER-PASSWORD
admin_tenant_name=service

rpc_backend=cinder.openstack.common.rpc.impl_qpid
qpid_hostname=controller

glance_host=controller

enabled_backends=lvmdriver-1,lvmdriver-2,nfs,glusterfs

[database]

connection=mysql://cinder:CINDER-USER-PASSWORD@MYSQL-SERVER/cinder

# add at the last

[lvmdriver-1]
# match the vg-group you created
volume_group=cinder-volumes-1
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
# name the backend_name you wanted
volume_backend_name=LVM_iSCSI-1

[lvmdriver-2]
# match the vg-group you created
volume_group=cinder-volumes-2
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
# name the backend_name you wanted
volume_backend_name=LVM_iSCSI-2

[nfs]
volume_driver=cinder.volume.drivers.nfs.NfsDriver
# name the backend_name you wanted
volume_backend_name=NFS
nfs_shares_config=/etc/cinder/nfs_shares
nfs_mount_point_base=/var/lib/cinder/nfs

[glusterfs]
volume_driver=cinder.volume.drivers.glusterfs.GlusterfsDriver
# name the backend_name you wanted
volume_backend_name=GlusterFS
glusterfs_shares_config=/etc/cinder/glusterfs_shares
glusterfs_mount_point_base=/var/lib/cinder/glusterfs


9. chown -R cinder:cinder /etc/cinder /var/log/cinder

service openstack-cinder-volume start; chkconfig openstack-cinder-volume on
service tgtd start; chkconfig tgtd on; chkconfig iscsi off; chkconfig iscsid off

10. on cinder controller node

source ~/adminrc

cinder type-create lvm1
cinder type-create lvm2
cinder type-create nfs
cinder type-create glusterfs

cinder type-key lvm1 set volume_backend_name=LVM_iSCSI-1
cinder type-key lvm2 set volume_backend_name=LVM_iSCSI-2
cinder type-key nfs set volume_backend_name=NFS
cinder type-key glusterfs set volume_backend_name=GlusterFS
cinder extra-specs-list

source  ~/demo1rc

cinder create --display-name disk_lvm1 --volume-type lvm1 1
cinder create --display-name disk_lvm2 --volume-type lvm2 1
cinder create --display-name disk_nfs --volume-type nfs 1
cinder create --display-name disk_glusterfs --volume-type glusterfs 1


cinder list


11. attach disk_lvm1 to CirrOS

nova list

nova volume-attach CirrOS id_of_disk_lvm1 auto

nova stop Cirros

nova volume-detach CirrOS id_of_disk_lvm1


Notes:

using NFS for cinder volume

on NFS Server:

yum -y install nfs-utils
vi /etc/exports
/share    *(rw,sync,no_root_squash)


service rpcbind start

service nfs start; chkconfig nfs on

using GlusterFS for cinder volume

on each glusterfs server:

1. yum  -y install glusterfs-server

vi /etc/hosts
# add GlusterFS servers
192.168.1.x   gfs01
192.168.1.x   gfs02

service glusterd start; chkconfig glusterd on

mkdir /home/vol_replica

2. Configure following settings on one GlusterFS server, on gfs01
gluster peer probe gfs02
gluster peer status
gluster volume create vol01 replica 2 transport tcp gfs01:/home/vol_replica gfs02:/home/vol_replica force
gluster volume start vol01
gluster volume info

3. on each glusterfs server:
gluster volume set vol01 storage.owner-uid 165
gluster volume set vol01 storage.owner-gid 165
gluster volume set vol01 server.allow-insecure on

vi /etc/glusterfs/glusterd.vol
option rpc-auth-allow-insecure on

service glusterd restart


你可能感兴趣的:(cinder)