04 rhcs之gfs2和clvm
使用共享存储创建高可用集群
[root@node4 ~]# yum -y install scsi-target-utils
[root@node4 ~]# vim /etc/tgt/targets.conf
在末尾添加
backing-store /dev/sda4
initiator-address 192.168.1.0/24
[root@node4 ~]# fdisk /dev/sda #创建sda4,并分配50Gp分区
[root@node4 ~]# partx -a /dev/sda
[root@node4 ~]# service tgtd start
[root@node4 ~]# tgtadm -L iscsi -m target -o show
#安装软件
[root@node1 ~]# yum -y install iscsi-initiator-utils;echo "InitiatorName=`iscsi-iname -p iqn.2015-01.com.magedu`" >/etc/iscsi/initiatorname.iscsi
[root@node2 ~]# yum -y install iscsi-initiator-utils;echo "InitiatorName=`iscsi-iname -p iqn.2015-01.com.magedu`" >/etc/iscsi/initiatorname.iscsi
[root@node3 ~]# yum -y install iscsi-initiator-utils;echo "InitiatorName=`iscsi-iname -p iqn.2015-01.com.magedu`" >/etc/iscsi/initiatorname.iscsi
#启动服务
[root@node1 ~]# service iscsi start ;service iscsid start
[root@node2 ~]# service iscsi start ;service iscsid start
[root@node3 ~]# service iscsi start ;service iscsid start
#发现存储设备
[root@node1 ~]# iscsiadm -m discovery -t st -p 192.168.1.154
[root@node2 ~]# iscsiadm -m discovery -t st -p 192.168.1.154
[root@node3 ~]# iscsiadm -m discovery -t st -p 192.168.1.154
#登录
[root@node1 ~]# iscsiadm -m node -T iqn.2015-01.com.magedu:node4.t1 -p 192.168.1.154 -l
[root@node2 ~]# iscsiadm -m node -T iqn.2015-01.com.magedu:node4.t1 -p 192.168.1.154 -l
[root@node3 ~]# iscsiadm -m node -T iqn.2015-01.com.magedu:node4.t1 -p 192.168.1.154 -l
#安装GFS2
[root@node1 ~]# yum -y install gfs2-utils
[root@node2 ~]# yum -y install gfs2-utils
[root@node3 ~]# yum -y install gfs2-utils
装载gfs2
[root@node1 ~]# modprobe gfs2
[root@node1 ~]# lsmod | grep gfs2
gfs2 548432 0
dlm 148231 22 gfs2
[root@node1 ~]# fdisk /dev/sdb #创建两个20G的分区
[root@node1 ~]# fdisk -l /dev/sdb
Disk /dev/sdb: 53.7 GB, 53691549696 bytes
64 heads, 32 sectors/track, 51204 cylinders
Units = cylinders of 2048 * 512 = 1048576 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk identifier: 0xabd7f733
Device Boot Start End Blocks Id System
/dev/sdb1 1 20481 20972528 83 Linux
/dev/sdb2 20482 40962 20972544 83 Linux
#创建gfs2集群文件系统
[root@node1 ~]# mkfs.gfs2 -j 2 -p lock_dlm -t tcluster:sdb1 /dev/sdb1
This will destroy any data on /dev/sdb1.
It appears to contain: data
Are you sure you want to proceed? [y/n] y
Device: /dev/sdb1
Blocksize: 4096
Device Size 20.00 GB (5243132 blocks)
Filesystem Size: 20.00 GB (5243131 blocks)
Journals: 2
Resource Groups: 81
Locking Protocol: "lock_dlm"
Lock Table: "tcluster:sdb1"
UUID: aebcc094-7b50-3df9-da3c-537894310e47
[root@node1 ~]# tunegfs2 /dev/sdb1
[root@node1 ~]# tunegfs2 -l /dev/sdb1
tunegfs2 (May 11 2016 09:59:26)
Filesystem volume name: tcluster:sdb1
Filesystem UUID: aebcc094-7b50-3df9-da3c-537894310e47
Filesystem magic number: 0x1161970
Block size: 4096
Block shift: 12
Root inode: 22
Master inode: 23
Lock Protocol: lock_dlm
Lock table: tcluster:sdb1
#node1节点挂载gfs2分区
[root@node1 ~]# mkdir -p /cluster/data
[root@node1 ~]# mount -t gfs2 /dev/sdb1 /cluster/data/
[root@node1 ~]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
none on /sys/kernel/config type configfs (rw)
/dev/sdb1 on /cluster/data type gfs2 (rw,relatime,hostdata=jid=0)
#显示日志区域
[root@node1 ~]# gfs2_tool journals /dev/sdb1
journal1 - 128MB
journal0 - 128MB
2 journal(s) found.
#node2节点挂载gfs2分区
[root@node2 ~]# mkdir -p /cluster/data
[root@node2 ~]# partx -a /dev/sdb
[root@node2 ~]# mount -t gfs2 /dev/sdb1 /cluster/data/
[root@node2 ~]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
none on /sys/kernel/config type configfs (rw)
/dev/sdb1 on /cluster/data type gfs2 (rw,relatime,hostdata=jid=1)
#测试1:在node2结点中复制文件
[root@node2 ~]# cd /cluster/data/
[root@node2 data]# cp /etc/fstab .
[root@node1 ~]# cd /cluster/data/
[root@node1 data]# ls
fstab
结果:node2中gfs2分区中添加的文件在node1中可以同步查看
#测试2:在node1结点中删除node2结点中复制文件的内容
[root@node1 data]# vim fstab
删除最后四行
在node2结点查看该文件时发现其最后四行内容已被删除
#node3节点挂载gfs2分区
[root@node3 ~]# mkdir -p /cluster/data
[root@node3 ~]# partx -a /dev/sdb
[root@node3 ~]# mount -t gfs2 /dev/sdb1 /cluster/data/
Too many nodes mounting filesystem, no free journals
#发现挂载点不够无法挂载
#解决办法,在其他已经挂载的结点上执行
[root@node2 data]# gfs2_jadd -j 1 /dev/sdb1
Filesystem: /cluster/data
Old Journals 2
New Journals 3
#重新在node3结点上挂载
[root@node3 ~]# mount -t gfs2 /dev/sdb1 /cluster/data/
[root@node3 ~]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
none on /sys/kernel/config type configfs (rw)
/dev/sdb1 on /cluster/data type gfs2 (rw,relatime,hostdata=jid=2)
#挂载成功
#冻结GFS2结点
[root@node2 ~]# gfs2_tool freeze /cluster/data/
[root@node2 ~]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
none on /sys/kernel/config type configfs (rw)
/dev/sdb1 on /cluster/data type gfs2 (rw,relatime,hostdata=jid=1)
#结点被冻结后,该结点可以读数据,但无法向其写数据
#解冻被冻结的结点
[root@node2 ~]# gfs2_tool unfreeze /cluster/data/
[root@node1 data]# yum -y install lvm2-cluster
[root@node2 data]# yum -y install lvm2-cluster
[root@node3 data]# yum -y install lvm2-cluster
#启用逻辑卷的集群功能
[root@node1 data]# lvmconf --enable-cluster
[root@node2 data]# lvmconf --enable-cluster
[root@node3 data]# lvmconf --enable-cluster
#启动集群逻辑卷服务
[root@node1 ~]# service clvmd start
[root@node2 ~]# service clvmd start
[root@node3 ~]# service clvmd start
[root@node1 data]# fdisk /dev/sdb
WARNING: DOS-compatible mode is deprecated. It's strongly recommended to
switch off the mode (command 'c') and change display units to
sectors (command 'u').
Command (m for help): t
Partition number (1-4): 2
Hex code (type L to list codes): 8e
Changed system type of partition 2 to 8e (Linux LVM)
Command (m for help): w
The partition table has been altered!
Calling ioctl() to re-read partition table.
WARNING: Re-reading the partition table failed with error 16: Device or resource busy.
The kernel still uses the old table. The new table will be used at
the next reboot or after you run partprobe(8) or kpartx(8)
Syncing disks.
[root@node1 data]# partx -a /dev/sdb
[root@node1 data]# pvcreate /dev/sdb2
[root@node1 data]# vgcreate cvg /dev/sdb2
[root@node1 data]# lvcreate -L 10G -n clv cvg
[root@node1 data]# mkfs.gfs2 -j 3 -t tcluster:clv -p lock_dlm /dev/cvg/clv
This will destroy any data on /dev/cvg/clv.
It appears to contain: symbolic link to `../dm-0'
Are you sure you want to proceed? [y/n] y
Device: /dev/cvg/clv
Blocksize: 4096
Device Size 10.00 GB (2621440 blocks)
Filesystem Size: 10.00 GB (2621438 blocks)
Journals: 3
Resource Groups: 40
Locking Protocol: "lock_dlm"
Lock Table: "tcluster:clv"
UUID: 1c42a8f1-5d14-5982-891f-3ce0faaa2123
[root@node3 ~]# mount -t gfs2 /dev/cvg/clv /mnt/
[root@node3 ~]# mount
/dev/sda2 on / type ext4 (rw)
proc on /proc type proc (rw)
sysfs on /sys type sysfs (rw)
devpts on /dev/pts type devpts (rw,gid=5,mode=620)
tmpfs on /dev/shm type tmpfs (rw)
/dev/sda1 on /boot type ext4 (rw)
none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)
none on /sys/kernel/config type configfs (rw)
/dev/sdb1 on /cluster/data type gfs2 (rw,relatime,hostdata=jid=2)
/dev/mapper/cvg-clv on /mnt type gfs2 (rw,relatime,hostdata=jid=0)
#扩展逻辑卷
#1)物理扩展
[root@node3 ~]# lvextend -L +5G /dev/cvg/clv
#2)逻辑扩展
[root@node3 ~]# gfs2_grow /dev/cvg/clv