3.6、Add the Block Storage service(Cinder)
Cinder:主要包括cinder api、cider-volume、cider-scheduler deamon、Messaging queue
cinder-api:接受API请求并将请求路由到cinder-volume来执行
cinder-volume: 响应请求,读取或写向块存储数据库为维护状态,通过信息队列机制与其他进程交互(cinder-scheduler),或直接与上层块存储提供的硬件或软件进行交互。通过驱动结构,它可以与众多的存储提供者交互
cinder-scheduler: 守护进程。类似于Nova-scheduler,为存储的实例选取最优的块存储供应节点
[root@linux-node1 ~]# yum install -y openstack-cinder python-cinderclient
[root@linux-node1 ~]# vim /etc/cinder/cinder.conf
connection=mysql://cinder:[email protected]/cinder
[root@linux-node1 ~]# cinder-manage db sync
[root@linux-node1 ~]# mysql -h 10.0.0.101 -u cinder -pcinder -e "use cinder;show tables;"
+---------------------------------+
| Tables_in_cinder |
+---------------------------------+
| backups |
| encryption |
| iscsi_targets |
| migrate_version |
| quality_of_service_specs |
| quota_classes |
| quota_usages |
| quotas |
| reservations |
| services |
| snapshot_metadata |
| snapshots |
| transfers |
| volume_admin_metadata |
| volume_glance_metadata |
| volume_metadata |
| volume_type_extra_specs |
| volume_types |
| volumes |
+---------------------------------+
[root@linux-node1 ~]# source keystone-admin
[root@linux-node1 ~]# keystone user-create --name=cinder --pass=cinder
+--------------+------------------------------------------------------+
| Property | Value |
+--------------+------------------------------------------------------+
| email | |
| enabled | True |
| id | 74bb2164d3a245b99f8aef148336bfe9 |
| name | cinder |
| username | cinder |
+--------------+------------------------------------------------------+
[root@linux-node1 ~]# vim /etc/cinder/cinder.conf
rabbit_host=10.0.0.101
rabbit_port=5672
rabbit_use_ssl=false
rabbit_userid=guest
rabbit_password=guest
rpc_backend=rabbit
my_ip=10.0.0.101
glance_host=$my_ip
debug=true
auth_strategy=keystone
auth_host=10.0.0.101
auth_port=35357
auth_protocol=http
auth_uri=http://10.0.0.101:5000
identity_uri=http://10.0.0.101:35357
auth_version=v2.0
admin_user=cinder
admin_password=cinder
admin_tenant_name=service
[root@linux-node1 ~]# grep '^[a-z]' /etc/cinder/cinder.conf
rabbit_host=10.0.0.101
rabbit_port=5672
rabbit_use_ssl=false
rabbit_userid=guest
rabbit_password=guest
rpc_backend=rabbit
my_ip=10.0.0.101
glance_host=$my_ip
auth_strategy=keystone
debug=true
connection=mysql://cinder:[email protected]/cinder
auth_host=10.0.0.101
auth_port=35357
auth_protocol=http
auth_uri=http://10.0.0.101:5000
identity_uri=http://10.0.0.101:35357
auth_version=v2.0
admin_user=cinder
admin_password=cinder
admin_tenant_name=service
[root@linux-node1 ~]# keystone service-create --name=cinder --type=volume
+--------------------+----------------------------------------------------+
| Property | Value |
+--------------------+----------------------------------------------------+
| description | |
| enabled | True |
| id | e4631dd90c7b43f28d8c738baf7b6143 |
| name | cinder |
| type | volume |
+--------------------+----------------------------------------------------+
[root@linux-node1 ~]# keystone user-role-add --user=cinder --tenant=service --role=admin
[root@linux-node1 ~]# keystone service-create --name=cinder --type=volume
+----------------+----------------------------------------------------+
| Property | Value |
+----------------+----------------------------------------------------+
| description | |
| enabled | True |
| id | e4631dd90c7b43f28d8c738baf7b6143 |
| name | cinder |
| type | volume |
+----------------+----------------------------------------------------+
[root@linux-node1 ~]# keystone endpoint-create \
--service-id=$(keystone service-list|awk '/ volume / {print $2}') \
--publicurl=http://10.0.0.101:8776/v1/%\(tenant_id\)s \
--internalurl=http://10.0.0.101:8776/v1/%\(tenant_id\)s \
--adminurl=http://10.0.0.101:8776/v1/%\(tenant_id\)s
+----------------+--------------------------------------+
| Property | Value |
+----------------+-------------------------------------+
| adminurl | http://10.0.0.101:8776/v1/%(tenant_id)s |
| id | 64701010778742119f59bfef83cce800 |
| internalurl | http://10.0.0.101:8776/v1/%(tenant_id)s |
| publicurl | http://10.0.0.101:8776/v1/%(tenant_id)s |
| region | regionOne |
| service_id | e4631dd90c7b43f28d8c738baf7b6143 |
+----------------+--------------------------------------+
[root@linux-node1 ~]# keystone service-create --name=cinderv2 --type=volume
+----------------+--------------------------------------------------+
| Property | Value |
+----------------+--------------------------------------------------+
| description | |
| enabled | True |
| id | c00b9989990e43c3b9200cc8e534727f |
| name | cinderv2 |
| type | volumev2 |
+----------------+--------------------------------------------------+
[root@linux-node1 ~]# keystone endpoint-create \
--service-id=$(keystone service-list|awk '/ volumev2 / {print $2}') \
--publicurl=http://10.0.0.101:8776/v2/%\(tenant_id\)s \
--internalurl=http://10.0.0.101:8776/v2/%\(tenant_id\)s \
--adminurl=http://10.0.0.101:8776/v2/%\(tenant_id\)s
+----------------+-------------------------------------+
| Property | Value |
+----------------+-------------------------------------+
| adminurl | http://10.0.0.101:8776/v2/%(tenant_id)s |
| id | 5471a44a5bd94682b4fa72c5bd5eb908 |
| internalurl | http://10.0.0.101:8776/v2/%(tenant_id)s |
| publicurl | http://10.0.0.101:8776/v2/%(tenant_id)s |
| region | regionOne |
| service_id | c00b9989990e43c3b9200cc8e534727f |
+----------------+-----------------------------------+
[root@linux-node1 ~]# keystone service-list
+---------------------------------+-----------+----------+---------------------------------+
| id | name | type | description |
+---------------------------------+-----------+----------+---------------------------------+
| e4631dd90c7b43f28d8c738baf7b6143 | cinder | volume | |
| c00b9989990e43c3b9200cc8e534727f | cinderv2 | volumev2 | |
| 34701cc1acd44e22b8ae8a9be069d0a7 | glance | image | |
| ea33049270ad4450ac789e9236774878 | keystone | identity | Openstack Identity |
| fd5b4252c2e34a4dbe5255a50c84d61f | neutron | network | |
| 3825f34e11de45399cf82f9a1c56b5c5 | nova | compute | |
+---------------------------------+-----------+----------+---------------------------------+
[root@linux-node1 ~]# keystone endpoint-list
+-----------------------------------------------------+---------------+----------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------+----------------------------------------------------+
| id | region | publicurl | internalurl | adminurl | service_id |
+-----------------------------------------------------+---------------+----------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------+------------------------------------------------------+
| 4821c43cdbcb4a8c9e202017ad056437 | regionOne | http://10.0.0.101:8774/v2/%(tenant_id)s | http://10.0.0.101:8774/v2/%(tenant_id)s | http://10.0.0.101:8774/v2/%(tenant_id)s | 3825f34e11de45399cf82f9a1c56b5c5 |
| 5471a44a5bd94682b4fa72c5bd5eb908 | regionOne | http://10.0.0.101:8776/v2/%(tenant_id)s | http://10.0.0.101:8776/v2/%(tenant_id)s | http://10.0.0.101:8776/v2/%(tenant_id)s | c00b9989990e43c3b9200cc8e534727f |
| 5f7f06a3b4874deda19caf67f5f21a1f | regionOne | http://10.0.0.101:9292 | http://10.0.0.101:9292 | http://10.0.0.101:9292 | 34701cc1acd44e22b8ae8a9be069d0a7 |
| 64701010778742119f59bfef83cce800 | regionOne | http://10.0.0.101:8776/v1/%(tenant_id)s | http://10.0.0.101:8776/v1/%(tenant_id)s | http://10.0.0.101:8776/v1/%(tenant_id)s | e4631dd90c7b43f28d8c738baf7b6143 |
| 735a100c58cf4335adc00bd4267a1387 | regionOne | http://10.0.0.101:5000/v2.0 | http://10.0.0.101:5000/v2.0 | http://10.0.0.101:35357/v2.0 | ea33049270ad4450ac789e9236774878 |
| 8faadda104854407beffc2ba76a35c48 | regionOne | http://10.0.0.101:9696 | http://10.0.0.101:9696 | http://10.0.0.101:9696 | fd5b4252c2e34a4dbe5255a50c84d61f |
+-----------------------------------------------------+---------------+----------------------------------------------------------+----------------------------------------------------------+--------------------------------------------------------+---------------------------------------------------------+
[root@linux-node1 ~]# /etc/init.d/openstack-cinder-api start
Starting openstack-cinder-api: [ OK ]
[root@linux-node1 ~]# /etc/init.d/openstack-cinder-scheduler start
Starting openstack-cinder-scheduler: [ OK ]
[root@linux-node1 ~]# chkconfig openstack-cinder-api on
[root@linux-node1 ~]# chkconfig openstack-cinder-scheduler on
[root@linux-node1 ~]# cinder service-list
+------------------+-------------+------+--------+-----------+---------------------------+-----------------------+
| Binary | Host | Zone | Status | State |Updated_at | Disabled Reason |
+------------------+-------------+------+--------+-----------+---------------------------+-----------------------+
| cinder-scheduler | linux-node1 | nova | enabled | up | 2018-07-01T00:18:12.000000 | None |
+------------------+-------------+------+--------+-----------+---------------------------+-----------------------+
3.6.1、关闭计算节点(linux-node2),在上面添加一块20G硬盘
[root@linux-node2 ~]# pvcreate /dev/sdb
Physical volume "/dev/sdb" successfully created
[root@linux-node2 ~]# vgcreate cinder-volumes /dev/sdb
Volume group "cinder-volume" successfully created
[root@linux-node2 ~]# vim /etc/lvm/lvm.conf
filter = [ "a/sda1/", "a/sdb/","r/.*/" ]
[root@linux-node2 ~]# yum install -y scsi-target-utils
[root@linux-node2 ~]# vim /etc/tgt/targets.conf
include /etc/cinder/volumes/*
[root@linux-node2 ~]# /etc/init.d/tgtd start
Starting SCSI target daemon: [ OK ]
[root@linux-node2 ~]# chkconfig tgtd on
[root@linux-node2 ~]# yum install -y openstack-cinder
在linux-node1上把cinder的配置文件scp到linux-node2上
[root@linux-node1 ~]# scp /etc/cinder/cinder.conf [email protected]:/etc/cinder/
[root@linux-node2 ~]# vim /etc/cinder/cinder.conf
my_ip=10.0.0.102
glance_host=10.0.0.101
iscsi_ip_address=$my_ip
olume_backend_name=iSCSI-Storage
iscsi_helper=tgtadm
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
[root@linux-node2 ~]# grep "^[a-z]" /etc/cinder/cinder.conf
rabbit_host=10.0.0.101
rabbit_port=5672
rabbit_use_ssl=false
rabbit_userid=guest
rabbit_password=guest
rpc_backend=rabbit
my_ip=10.0.0.102
glance_host=10.0.0.101
auth_strategy=keystone
debug=true
iscsi_ip_address=$my_ip
volume_backend_name=iSCSI-Storage
iscsi_helper=tgtadm
volume_driver=cinder.volume.drivers.lvm.LVMISCSIDriver
connection=mysql://cinder:[email protected]/cinder
auth_host=10.0.0.101
auth_port=35357
auth_protocol=http
auth_uri=http://10.0.0.101:5000
identity_uri=http://10.0.0.101:35357
auth_version=v2.0
admin_user=cinder
admin_password=cinder
admin_tenant_name=service
[root@linux-node2 ~]# /etc/init.d/openstack-cinder-volume start
Starting openstack-cinder-volume: [ OK ]
[root@linux-node2 ~]# chkconfig openstack-cinder-volume on
在linux-node1上验证linux-node2上的cinder是否启动成功
[root@linux-node1 ~]# cinder service-list
+------------------+-----------+--------+---------+-----------+---------------------------+---------------------------+
| Binary | Host | Zone | Status | State | Updated_at | Disabled Reason |
+------------------+-----------+--------+---------+-----------+---------------------------+---------------------------+
| cinder-scheduler | linux-node1 | nova | enabled | up | 2018-07-04T08:44:18.000000 | None |
| cinder-volume | linux-node2 | nova | enabled | up | 2018-07-04T08:44:19.000000 | None |
+------------------+-----------+--------+---------+-----------+---------------------------+---------------------------+
3.6.2、登录demo账户,创建云硬盘
[root@linux-node2 ~]# lvdisplay ##验证创建的云盘
--- Logical volume ---
LV Path /dev/cinder-volumes/volume-5c65d291-0f91-4fca-83a5-97bb8a4a5ddc
LV Name volume-5c65d291-0f91-4fca-83a5-97bb8a4a5ddc
VG Name cinder-volumes
LV UUID t042Do-lmu5-UdcU-3jyh-3Tfk-WIof-xZyM3K
LV Write Access read/write
LV Creation host, time linux-node2, 2018-07-04 18:22:47 +0800
LV Status available
# open 0
LV Size 1.00 GiB
Current LE 256
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 253:0
3.6.3、把云硬盘(test)挂载到以前创建的云主机(demo)上,云主机(demo)应是active状态
用Xshell远程连接云主机(进入云主机,得知ip为10.0.0.32)
$ sudo fdisk -l
Disk /dev/vda: 1073 MB, 1073741824 bytes
255 heads, 63 sectors/track, 130 cylinders, total 2097152 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Device Boot Start End Blocks Id System
/dev/vda1 * 16065 2088449 1036192+ 83 Linux
Disk /dev/vdb: 1073 MB, 1073741824 bytes
16 heads, 63 sectors/track, 2080 cylinders, total 2097152 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Disk /dev/vdb doesn't contain a valid partition table
$ sudo fdisk /dev/vdb
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel with disk identifier 0xa9716485.
Changes will remain in memory only, until you decide to write them.
After that, of course, the previous content won't be recoverable.
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)
Command (m for help): n
Command action
e extended
p primary partition (1-4)
Select (default p):p
Partition number (1-4, default 1): 1
First sector (2048-2097151, default 2048):
Using default value 2048
Last sector, +sectors or +size{K,M,G} (2048-2097151, default 2097151):
Using default value 2097151
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
Syncing disks.
$ sudo fdisk -l
Disk /dev/vda: 1073 MB, 1073741824 bytes
255 heads, 63 sectors/track, 130 cylinders, total 2097152 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0x00000000
Device Boot Start End Blocks Id System
/dev/vda1 * 16065 2088449 1036192+ 83 Linux
Disk /dev/vdb: 1073 MB, 1073741824 bytes
9 heads, 8 sectors/track, 29127 cylinders, total 2097152 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0xa9716485
Device Boot Start End Blocks Id System
/dev/vdb1 2048 2097151 1047552 83 Linux
$ sudo mkfs.ext4 /dev/vdb1 ##格式化云硬盘
mke2fs 1.41.14 (22-Dec-2010)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
65536 inodes, 261888 blocks
13094 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=268435456
8 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376
Writing inode tables: done
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done
This filesystem will be automatically checked every 31 mounts or
180 days, whichever comes first. Use tune2fs -c or -i to override.
$ sudo mkdir /data1 ##创建目录
$ sudo mount /dev/vdb1 /data1 ##挂载云硬盘
$ cd /data1/
$ ls
lost+found
$ cd
$ pwd
/home/cirros
$ sudo umount /dev/vdb1 ##离开/data1目录,卸载云硬盘,就可以把云硬盘和云主机断开
3.6.4、为云硬盘创建类型
[root@linux-node1 ~]# source keystone-admin
[root@linux-node1 ~]# cinder type-create iSCSI
[root@linux-node1 ~]# cinder type-key iSCSI set volume_backend_name=iSCSI-Storage
3.6.4.1、创建一个类型为iSCSI的云硬盘
3.6.4.2、创建一个类型为NFS的云硬盘
[root@linux-node1 ~]# yum install -y nfs-utils rpcbind
[root@linux-node1 ~]# mkdir -p /data/nfs
[root@linux-node1 ~]# vim /etc/exports
/data/nfs *(rw,no_root_squash)
[root@linux-node1 ~]# /etc/init.d/rpcbind start
Starting rpcbind: [ OK ]
[root@linux-node1 ~]# /etc/init.d/nfs start
Starting NFS services: [ OK ]
Starting NFS quotas: [ OK ]
Starting NFS mountd: [ OK ]
Starting NFS daemon: [ OK ]
Starting RPC idmapd: [ OK ]
[root@linux-node1 ~]# vim /etc/cinder/cinder.conf
volume_driver=cinder.volume.drivers.nfs.NfsDriver
nfs_shares_config=/etc/cinder/nfs_shares
nfs_mount_point_base=$state_path/mnt
[root@linux-node1 ~]# vim /etc/cinder/nfs_shares
10.0.0.101:/data/nfs
[root@linux-node1 ~]# /etc/init.d/openstack-cinder-volume start
Starting openstack-cinder-volume: [ OK ]
[root@linux-node1 ~]# cinder service-list
+------------------+-------------+------+---------+----------+----------------------------+----------------------------+
| Binary | Host | Zone | Status| State | Updated_at | Disabled Reason |
+------------------+-------------+------+---------+----------+----------------------------+----------------------------+
| cinder-scheduler | linux-node1 | nova | enabled | up | 2018-07-04T13:25:42.000000 | None |
| cinder-volume | linux-node1 | nova | enabled | up | 2018-07-04T13:25:43.000000 | None |
| cinder-volume | linux-node2 | nova | enabled | up | 2018-07-04T13:26:33.000000 | None |
+------------------+-------------+------+---------+----------+----------------------------+----------------------------+
[root@linux-node1 ~]# vim /etc/cinder/cinder.conf
volume_backend_name=NFS-Storage
[root@linux-node1 ~]# /etc/init.d/openstack-cinder-volume restart
Stopping openstack-cinder-volume: [ OK ]
Starting openstack-cinder-volume: [ OK ]
[root@linux-node1 ~]# cinder type-create NFS
+--------------------------------------+--------+
| ID | Name |
+--------------------------------------+--------+
| b1939e5e-752d-4dc6-a61a-c9145b64eccf | NFS |
+--------------------------------------+--------+
[root@linux-node1 ~]# cinder type-key NFS set volume_backend_name=NFS-Storage
[root@linux-node1 ~]# cinder type-list
+--------------------------------------+--------+
| ID | Name |
+--------------------------------------+--------+
| af7eb2c5-144a-44a8-ac8d-d9751a553906 | iSCSI |
| b1939e5e-752d-4dc6-a61a-c9145b64eccf | NFS |
+--------------------------------------+--------+
[root@linux-node1 ~]# mount
/dev/sda3 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
sunrpc on /var/lib/nfs/rpc_pipefs type rpc_pipefs (rw)
nfsd on /proc/fs/nfsd type nfsd (rw)
10.0.0.101:/data/nfs on /var/lib/cinder/mnt/7b5d53c7793d18d5e9aed8027e4b2a36 type nfs (rw,vers=4,addr=10.0.0.101,clientaddr=10.0.0.101)
创建一个类型为NFS的云硬盘
[root@linux-node2 ~]# lvdisplay
--- Logical volume ---
LV Path /dev/cinder-volumes/volume-5c65d291-0f91-4fca-83a5-97bb8a4a5ddc
LV Name volume-5c65d291-0f91-4fca-83a5-97bb8a4a5ddc
VG Name cinder-volumes
LV UUID t042Do-lmu5-UdcU-3jyh-3Tfk-WIof-xZyM3K
LV Write Access read/write
LV Creation host, time linux-node2, 2018-07-04 18:22:47 +0800
LV Status available
# open 0
LV Size 1.00 GiB
Current LE 256
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 253:0
--- Logical volume ---
LV Path /dev/cinder-volumes/volume-036bc228-893b-4327-87f2-5a97c4e38ebe
LV Name volume-036bc228-893b-4327-87f2-5a97c4e38ebe
VG Name cinder-volumes
LV UUID mI2iBB-vazJ-4vx6-uEEL-uYsL-9x7R-bMbDwh
LV Write Access read/write
LV Creation host, time linux-node2, 2018-07-04 20:02:12 +0800
LV Status available
# open 0
LV Size 1.00 GiB
Current LE 256
Segments 1
Allocation inherit
Read ahead sectors auto
- currently set to 256
Block device 253:1
注:不管能否解决你遇到的问题,欢迎相互交流,共同提高!