# mv /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf # vi /etc/corosync/corosync.conf # Please read the corosync.conf.5 manual page compatibility: whitetank totem { version: 2 secauth: off threads: 0 interface { ringnumber: 0 bindnetaddr: 192.168.1.104 mcastaddr: 226.94.1.1 mcastport: 5405 ttl: 1 } } logging { fileline: off to_stderr: no to_logfile: yes to_syslog: yes logfile: /var/log/cluster/corosync.log debug: off timestamp: on logger_subsys { subsys: AMF debug: off } } amf { mode: disabled } service { ver: 0 name: pacemaker } aisexec { user: root group: root }
Bindnetaddr 为节点ip
Mcastaddr 为合法的组播地址,随便填
2.2生成密钥文件
(先一台,再拷贝到另一台)
说明:对于corosync而言,各节点之间通信需要安全认证,所以需要安全密钥,生成后会自动保存至当前目录下,命名为authkey,权限为400。
# cd /etc/corosync # corosync-keygen # ll -rw-------. 1 root root 128 6月 11 19:18 authkey -rw-r--r--. 1 root root 509 6月 11 19:54 corosync.conf -rw-r--r--. 1 root root 445 11月 22 2013 corosync.conf.example -rw-r--r--. 1 root root 1084 11月 22 2013 corosync.conf.example.udpu drwxr-xr-x. 2 root root 4096 11月 22 2013 service.d
scp authkey 192.168.1.105:/etc/corosync
# service corosync start
(目的是因为只有2个节点,忽略法定票数)
# crm configure property stonith-enabled=false
# crm configure property no-quorum-policy=ignore
# crm_mon -1 Last updated: Fri May 22 15:56:37 2015 Last change: Fri May 22 13:09:33 2015 via crmd on H04 Stack: classic openais (with plugin) Current DC: H04 - partition with quorum Version: 1.1.10-14.el6-368c726 2 Nodes configured, 2 expected votes 0 Resources configured Online: [ H04 H05 ]
(只需主节点配置)
说明: Pacemaker主要管理资源,本实验中为了搭建实现基于rbd+scsi+nfs的共享,所以会对rbd map 、mount 、nfs-export、vip等资源进行管理。简而言之,自动管理rbd到nfs每个操作步骤。
# service tgtd start # vi /etc/tgt/targets.conf <target iqn.2015-05.rbdstore.example.com:iscsi> driver iscsi bs-type rbd backing-store rbd/img02 # <iscsi-pool>/<iscsi-rbd-image> </target> # service tgtd reload
注:rbd img创建命令 # rbd create rbd/img02 --size 10240 --stripe-unit 65536 --stripe-count 16 --image-format 2 //有条带的 # rbd create rbd/img02 –-size 10240 //无条带
# iscsiadm -m discovery -t sendtargets -p localhost //发现设备 # iscsiadm -m node -T iqn.2015-05.rbdstore.com:iscsi –l –p localhost Logging in to [iface: default, target: iqn.2015-04.rbdstore.example.com:iscsi, portal: ::1,3260] (multiple) Login to [iface: default, target: iqn.2015-04.rbdstore.example.com:iscsi, portal: ::1,3260] successful. # fdisk –l //查看设备,找到多出来的设备 Disk /dev/sdb: 1073 MB, 1073741824 bytes 34 heads, 61 sectors/track, 1011 cylinders Units = cylinders of 2074 * 512 = 1061888 bytes Sector size (logical/physical): 512 bytes / 4194304 bytes I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes Disk identifier: 0x00000000 # mkfs.xfs /dev/sdb # iscsiadm -m node -T iqn.2015-05.rbdstore.com:iscsi –-logout
备注:3.2步骤的目的是为在rbd-tgt设备中写入文件系统。后续做自动化脚本
除了步骤3.3.1在两台执行,其他步骤都只在一台
提前创建好共享目录(这里用uuid,其他也可)
/mnt/ f462234b-bf5a-4e9c-969c-09596ad14afc
备注:查看uuid
# blkid /dev/mapper/vg_00-lvroot: UUID="186c6608-98ec-4584-9208-3053879d01bd" TYPE="ext4" /dev/sda1: UUID="6e8a54ab-1053-4c71-9df0-9ce51b70b2ac" TYPE="ext4" /dev/sda2: UUID="q9g11Q-BF1k-b11a-dkV5-sttO-ibu0-091Z8u" TYPE="LVM2_member" /dev/sda3: UUID="575bbe16-ac4e-4d26-adfc-353c66f7ab43" TYPE="xfs" /dev/sdb: UUID="f462234b-bf5a-4e9c-969c-09596ad14afc" TYPE="xfs" # mkdir /usr/lib/ocf/resource.d/ceph # cd /usr/lib/ocf/resource.d/ceph/ # chmod + rbd.in 将iscsi 设备自定义监控脚本f部署在两台机器上 # mkdir /usr/lib/ocf/resource.d/tgtmap # cd /usr/lib/ocf/resource.d/tgtmap/ # chmod + iscsi.in
(可以用crm configure edit命令直接copy下面内容)
primitive p_scsi_map_1 ocf:tgtmap:iscsi.in \
params tgt_name="iqn.2015-05.rbdstore.com:iscsi" tgt_ip=localhost \
op monitor interval=5
primitive p_fs_rbd_1 Filesystem \ params directory="/mnt/f462234b-bf5a-4e9c-969c-09596ad14afc" fstype=xfs device="-U f462234b-bf5a-4e9c-969c-09596ad14afc" fast_stop=no \ op monitor interval=20s timeout=40s \ op start interval=0 timeout=60s \ op stop interval=0 timeout=60s
primitive p_export_rbd_1 exportfs \ params directory="/mnt/f462234b-bf5a-4e9c-969c-09596ad14afc " clientspec="192.168.0.0/24" options="rw,async,no_subtree_check,no_root_squash" fsid=1 \ op monitor interval=10s timeout=20s \
primitive p_vip_1 IPaddr \ params ip=192.168.1.123 cidr_netmask=24 \ op monitor interval=5
primitive p_rpcbind lsb:rpcbind \ op monitor interval=10s timeout=30s primitive p_nfs_server lsb:nfs \ op monitor interval=10s timeout=30s
group g_nfs p_rpcbind p_nfs_server group g_rbd_share_1 p_scsi_map_1 p_fs_rbd_1 p_export_rbd_1 p_vip_1 clone clo_nfs g_nfs \ meta globally-unique="false" target-role="Started"
location l_g_rbd_share_1 g_rbd_share_1 inf: H04
# crm_mon -1
# showmount -e 192.168.1.123 Export list for 192.168.1.123: /mnt/f462234b-bf5a-4e9c-969c-09596ad14afc 192.168.0.0/24