执行步骤如下:
- 确认当前HA环境的情况
- 配置vsan集群启用ISCSI
- VSAN上创建分配ISCSI磁盘
- 服务器上配置ISCSI目标及多路径
- 参考资料
具体操作记录
确认当前HA环境的情况
HA资源使用rsc_sbd
gcrmapp01:~ # crm configure show rsc_sbd
primitive rsc_sbd stonith:external/sbd \
params sbd_device="/dev/disk/by-id/scsi-14945540000000000766f6c2d363641314532414600000000-part1" \
op monitor interval=15 timeout=30
查看rsc_sbd的设备
gcrmapp01:~ # ll /dev/disk/by-id/scsi-14945540000000000766f6c2d363641314532414600000000-part1
lrwxrwxrwx 1 root root 10 Mar 6 20:09 /dev/disk/by-id/scsi-14945540000000000766f6c2d363641314532414600000000-part1 -> ../../sdd1
gcrmapp01:~ # lsblk
NAME MAJ:MIN RM SIZE RO MOUNTPOINT
sda 8:0 0 158M 0
└─sda1 8:1 0 156M 0 /boot
sdc 8:32 0 169.9G 0
└─sdc1 8:33 0 169.9G 0
├─system-root (dm-0) 252:0 0 150G 0 /
└─system-swap (dm-1) 252:1 0 19.9G 0 [SWAP]
sdb 8:16 0 50G 0
└─sdb1 8:17 0 50G 0
└─localvg-usrsaplv (dm-2) 252:2 0 50G 0 /usr/sap
sr0 11:0 1 1024M 0
sdd 8:48 0 1G 0
└─sdd1 8:49 0 1023M 0
查看sbd进程配置
gcrmapp01:~ # cat /etc/init.d/openais |grep sbd
SBD_CONFIG=/etc/sysconfig/sbd
SBD_BIN="/usr/sbin/sbd"
sleep $(sbd -d "$SBD_DEVICE" dump | grep -m 1 msgwait | awk '{print $4}') 2>/dev/null
local stopsbd=1
stopsbd=0
if [ $stopsbd = 1 ]; then
gcrmapp01:~ #
gcrmapp01:~ # cat /etc/sysconfig/sbd
SBD_DEVICE="/dev/disk/by-id/scsi-14945540000000000766f6c2d363641314532414600000000-part1"
SBD_OPTS="-W -P -t 130 -5 120"
You have new mail in /var/mail/root
gcrmapp01:~ #
ISCSI信息
gcrmapp01:~ # iscsiadm -m node
172.16.195.68:3260,1 iqn.2017-02.com.bingocloud.s2:sn.001
gcrmapp01:~ # iscsiadm -m session
tcp: [1] 172.16.195.68:3260,1 iqn.2017-02.com.bingocloud.s2:sn.001 (non-flash)
启用VSAN上ISCSI服务
ISCSI配置实用vmk2通道
新建iSCSI目标
添加LUN磁盘卷
服务器ISCSI配置
- 发现目标器:
iscsiadm -m discovery -t st -p 172.168.20.164
iscsiadm -m discovery -t st -p 172.168.20.165
iscsiadm -m discovery -t st -p 172.168.20.166
iscsiadm -m discovery -t st -p 172.168.20.167
- 显示所有已发现目标器节点信息:
gcrmapp01:~ # iscsiadm -m node
172.168.20.165:3260,257 iqn.1998-01.com.vmware.52b5b5239e981238-f8d736b9e3346c60
172.168.20.166:3260,257 iqn.1998-01.com.vmware.52b5b5239e981238-f8d736b9e3346c60
172.168.20.167:3260,257 iqn.1998-01.com.vmware.52b5b5239e981238-f8d736b9e3346c60
172.168.20.164:3260,257 iqn.1998-01.com.vmware.52b5b5239e981238-f8d736b9e3346c60
gcrmapp01:~ #
- 登陆、登出所有目标器:
iscsiadm -m node -L all
iscsiadm -m node –U all
- iscsi会话
gcrmapp01:~ # iscsiadm -m session
tcp: [1] 172.168.20.166:3260,257 iqn.1998-01.com.vmware.52b5b5239e981238-f8d736b9e3346c60 (non-flash)
tcp: [2] 172.168.20.167:3260,257 iqn.1998-01.com.vmware.52b5b5239e981238-f8d736b9e3346c60 (non-flash)
tcp: [3] 172.168.20.164:3260,257 iqn.1998-01.com.vmware.52b5b5239e981238-f8d736b9e3346c60 (non-flash)
tcp: [4] 172.168.20.165:3260,257 iqn.1998-01.com.vmware.52b5b5239e981238-f8d736b9e3346c60 (non-flash)
gcrmapp01:~ #
- isci目标磁盘
gcrmapp01:/ # lsblk
NAME MAJ:MIN RM SIZE RO MOUNTPOINT
....
sdf 8:80 0 1G 0
└─sdf1 8:81 0 1023M 0
sdg 8:96 0 1.5G 0
└─sdg1 8:97 0 1.5G 0
sdh 8:112 0 1G 0
└─sdh1 8:113 0 1023M 0
sdi 8:128 0 1G 0
└─sdi1 8:129 0 1023M 0
sdk 8:160 0 1.5G 0
└─sdk1 8:161 0 1.5G 0
sdj 8:144 0 1.5G 0
└─sdj1 8:145 0 1.5G 0
sdl 8:176 0 1G 0
└─sdl1 8:177 0 1023M 0
sdm 8:192 0 1.5G 0
└─sdm1 8:193 0 1.5G 0
- 多路径设备查看
for i in `cat /proc/partitions | awk {'print $4'} |grep sd`; do echo "### $i: `/lib/udev/scsi_id --whitelist --replace-whitespace /dev/$i`"; done
gcrmapp01:~ # for i in `cat /proc/partitions | awk {'print $4'} |grep sd`; do echo "### $i: `/lib/udev/scsi_id --whitelist --replace-whitespace /dev/$i`"; done
### sda:
### sda1:
### sdc:
### sdc1:
### sdb:
### sdb1:
### sdd: 1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e
### sdd1: 1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e
### sde: 1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a
### sde1: 1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a
### sdf: 1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e
### sdf1: 1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e
### sdh: 1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a
### sdh1: 1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a
### sdg: 1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e
### sdg1: 1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e
### sdi: 1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a
### sdi1: 1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a
gcrmapp01:~ # grep -v ^# /etc/multipath.conf
defaults {
user_friendly_names yes
}
blacklist {
devnode ^hd[a-z]
}
multipaths {
multipath {
wwid 1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e
alias iscsi-disk-1
}
multipath {
wwid 1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a
alias iscsi-disk-2
}
}
gcrmapp01:~ # chkconfig multipathd on
gcrmapp01:~ # chkconfig --list | grep mult
multipathd 0:off 1:off 2:off 3:on 4:off 5:on 6:off
gcrmapp01:~ # rcmultipathd restart
Shutting down multipathd done
Starting multipathd done
gcrmapp01:~ #
- 多路径设备-iscsi-disk别名
gcrmapp01:~ # lsblk
NAME MAJ:MIN RM SIZE RO MOUNTPOINT
sda 8:0 0 158M 0
└─sda1 8:1 0 156M 0 /boot
sdc 8:32 0 169.9G 0
└─sdc1 8:33 0 169.9G 0
├─system-root (dm-0) 252:0 0 150G 0 /
└─system-swap (dm-1) 252:1 0 19.9G 0 [SWAP]
sdb 8:16 0 50G 0
└─sdb1 8:17 0 50G 0
└─localvg-usrsaplv (dm-2) 252:2 0 50G 0 /usr/sap
sr0 11:0 1 1024M 0
sdd 8:48 0 1G 0
├─sdd1 8:49 0 1023M 0
└─iscsi-disk-1 (dm-3) 252:3 0 1G 0
└─iscsi-disk-1_part1 (dm-5) 252:5 0 1023M 0
sde 8:64 0 1.5G 0
├─sde1 8:65 0 1.5G 0
└─iscsi-disk-2 (dm-4) 252:4 0 1.5G 0
└─iscsi-disk-2_part1 (dm-6) 252:6 0 1.5G 0
sdf 8:80 0 1G 0
├─sdf1 8:81 0 1023M 0
└─iscsi-disk-1 (dm-3) 252:3 0 1G 0
└─iscsi-disk-1_part1 (dm-5) 252:5 0 1023M 0
sdh 8:112 0 1.5G 0
├─sdh1 8:113 0 1.5G 0
└─iscsi-disk-2 (dm-4) 252:4 0 1.5G 0
└─iscsi-disk-2_part1 (dm-6) 252:6 0 1.5G 0
sdg 8:96 0 1G 0
├─sdg1 8:97 0 1023M 0
└─iscsi-disk-1 (dm-3) 252:3 0 1G 0
└─iscsi-disk-1_part1 (dm-5) 252:5 0 1023M 0
sdi 8:128 0 1.5G 0
├─sdi1 8:129 0 1.5G 0
└─iscsi-disk-2 (dm-4) 252:4 0 1.5G 0
└─iscsi-disk-2_part1 (dm-6) 252:6 0 1.5G 0
sdj 8:144 0 1G 0
├─sdj1 8:145 0 1023M 0
└─iscsi-disk-1 (dm-3) 252:3 0 1G 0
└─iscsi-disk-1_part1 (dm-5) 252:5 0 1023M 0
sdk 8:160 0 1.5G 0
├─sdk1 8:161 0 1.5G 0
└─iscsi-disk-2 (dm-4) 252:4 0 1.5G 0
└─iscsi-disk-2_part1 (dm-6) 252:6 0 1.5G 0
gcrmapp01:~ #
gcrmapp01:~ # ll /dev/disk/by-id/scsi-*
lrwxrwxrwx 1 root root 10 Mar 12 16:11 /dev/disk/by-id/scsi-1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e -> ../../dm-3
lrwxrwxrwx 1 root root 10 Mar 12 16:13 /dev/disk/by-id/scsi-1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e-part1 -> ../../dm-5
lrwxrwxrwx 1 root root 10 Mar 12 16:11 /dev/disk/by-id/scsi-1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a -> ../../dm-4
lrwxrwxrwx 1 root root 10 Mar 12 16:11 /dev/disk/by-id/scsi-1VMware_VITDEVIDbf26675e34b40a4ef3606c92bfa6be9a-part1 -> ../../dm-6
lrwxrwxrwx 1 root root 10 Mar 12 16:11 /dev/disk/by-id/scsi-iscsi-disk-1 -> ../../dm-3
lrwxrwxrwx 1 root root 10 Mar 12 16:13 /dev/disk/by-id/scsi-iscsi-disk-1-part1 -> ../../dm-5
lrwxrwxrwx 1 root root 10 Mar 12 16:11 /dev/disk/by-id/scsi-iscsi-disk-2 -> ../../dm-4
lrwxrwxrwx 1 root root 10 Mar 12 16:11 /dev/disk/by-id/scsi-iscsi-disk-2-part1 -> ../../dm-6
gcrmapp01:~ #
/dev/disk/by-id/scsi-1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e-part1
更新HA资源rsc_sbd为新磁盘设备
gcrmapp01:~ # crm configure edit rsc_sbd
primitive rsc_sbd stonith:external/sbd \
params sbd_device="/dev/disk/by-id/scsi-1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e-part1" \
op monitor interval=15 timeout=30
# vim: set filetype=pcmk:
~
更新rsc_sbd配置
gcrmapp01:~ # cat /etc/sysconfig/sbd
SBD_DEVICE="/dev/disk/by-id/scsi-1VMware_VITDEVID9d26675efa7f2e1820c16c92bfa6be0e-part1"
SBD_OPTS="-W -P -t 130 -5 120"
gcrmapp01:~ #
验证
重启HA集群
rcopenais start
crm_mon -r1
节点2切换为standby
gcrmapp02:~ # crm node standby gcrmapp02
gcrmapp02:/ # crm_mon r1
non-option ARGV-elements: r1
Last updated: Thu Mar 12 16:48:59 2020
Last change: Thu Mar 12 16:46:19 2020 by root via crm_attribute on gcrmapp02
Stack: classic openais (with plugin)
Current DC: gcrmapp02 - partition with quorum
Version: 1.1.12-f47ea56
2 Nodes configured, 2 expected votes
6 Resources configured
Node gcrmapp02: standby
Online: [ gcrmapp01 ]
Master/Slave Set: ms_gcp_ascs30 [rsc_gcp_ascs30_inst]
Masters: [ gcrmapp01 ]
rsc_gcp_pas_vip (ocf::heartbeat:IPaddr2): Started gcrmapp01
rsc_gcp_ascs_vip (ocf::heartbeat:IPaddr2): Started gcrmapp01
rsc_gcp_DVEBMGS31_inst (ocf::heartbeat:SAPInstance): Started gcrmapp01
rsc_sbd (stonith:external/sbd): Started gcrmapp01
节点2切换上线
gcrmapp02:~ # crm node online gcrmapp02
gcrmapp02:~ # crm node show
gcrmapp01: normal
sdn_cnt=25 standby=off
gcrmapp02: normal
sdn_cnt=3 standby=off
gcrmapp02:~ #
gcrmapp02:/ # crm_mon r1
non-option ARGV-elements: r1
Last updated: Thu Mar 12 16:56:08 2020
Last change: Thu Mar 12 16:55:17 2020 by root via crm_attribute on gcrmapp02
Stack: classic openais (with plugin)
Current DC: gcrmapp02 - partition with quorum
Version: 1.1.12-f47ea56
2 Nodes configured, 2 expected votes
6 Resources configured
Online: [ gcrmapp01 gcrmapp02 ]
Master/Slave Set: ms_gcp_ascs30 [rsc_gcp_ascs30_inst]
Masters: [ gcrmapp01 ]
Slaves: [ gcrmapp02 ]
rsc_gcp_pas_vip (ocf::heartbeat:IPaddr2): Started gcrmapp02
rsc_gcp_ascs_vip (ocf::heartbeat:IPaddr2): Started gcrmapp01
rsc_gcp_DVEBMGS31_inst (ocf::heartbeat:SAPInstance): Started gcrmapp02
rsc_sbd (stonith:external/sbd): Started gcrmapp01
重启操作系统
删除旧iscsi设备及相关配置
从iscsci target 目标 登出
iscsiadm -m node -T iqn.2017-02.com.bingocloud.s2:sn.001 -p 172.16.195.68 -u
删除iscsci target 目标
iscsiadm -m node -o delete -T iqn.2017-02.com.bingocloud.s2:sn.001 -p 172.16.195.68