# yum install -y pacemaker cman pcs
# chkconfig corosync off
将RHEL6.6安装光盘放入光驱中,并确认本地yum源配置无误的情况下,执行以下命令:
# yum update -y pacemaker cman pcs
分别在节点zhcbdppm01与zhcbdppm02中执行以下命令:
# passwd hacluster
# service pcsd start
# chkconfig pcsd on
# pcs cluster auth zhcbdppm01 zhcbdppm02
# pcs cluster setup --start --name zhcbdppmclusterzhcbdppm01 zhcbdppm02
# pcs stonith create zhcbdppm01_fencefence_ipmilan \
ipaddr=” 21.244.112.195” \
passwd=” superuser” \
login=” zteroot” \
action=”reboot”\
pcmk_host_list= zhcbdppm01
# pcs stonith create zhcbdppm02_fencefence_ipmilan \
ipaddr=” 21.244.112.196” \
passwd=” superuser” \
login=” zteroot” \
action=”reboot” \
pcmk_host_list= zhcbdppm02
# pcs resource create mariadb_drbd_res \
ocf:linbit:drbd\
drbd_resource=mariadb_drbd
# pcs resource master mariadb-drbdmariadb_drbd_res \
master-max=1 \
master-node-max=1 \
clone-max=2 \
clone-node-max=1 \
notify=true
# pcs resource create manager_drbd_res \
ocf:linbit:drbd \
drbd_resource=manager_drbd
# pcs resource master manager-drbd manager_drbd_res\
master-max=1 \
master-node-max=1 \
clone-max=2 \
clone-node-max=1 \
notify=true
# pcs resource create mariadbIPocf:heartbeat:IPaddr2 \
ip=21.244.112.198\
cidr_netmask=24 \
--groupmariadb_service
# pcs resource create mariadbFS ocf:heartbeat:Filesystem\
device=”/dev/drbd0” \
directory=” /data2” \
fstype=”ext4” \
--group mariadb_service
# pcs resource create mariadbScript lsb:mysql\
--group mariadb_service
# pcs resource create managerIPocf:heartbeat:IPaddr2 \
ip=21.244.112.197\
cidr_netmask=24 \
--groupmanager_service
# pcs resource create managerFS ocf:heartbeat:Filesystem\
device=” /dev/drbd1” \
directory=” /data1” \
fstype=”ext4” \
--groupmanager_service
# pcs resource create managerScript lsb:managerha\
--group manager_service
# pcs resource create loaderActive lsb:loaderActive.sh
# pcs resource create loaderStandby lsb: loaderStandby
# pcs constraint location loaderStandbyprefers \
zhcbdppm01=200
# pcs constraint location loaderStandbyprefers \
zhcbdppm02=100
# pcs constraint location loaderActiveprefers \
zhcbdppm02=200
# pcs constraint location loaderStandbyprefers \
zhcbdppm02=100
# pcs constraint location mariadb_serviceprefers \
zhcbdppm01=200
# pcs constraint location mariadb_serviceprefers \
zhcbdppm02=100
# pcs constraint location manager_serviceprefers \
zhcbdppm01=200
# pcs constraint location manager_serviceprefers \
zhcbdppm02=100
# pcs constraint order promote mariadb-drbdthen start mariadb_service
# pcs constraint order promote manager-drbdthen start manager_service
# pcs constraint order start mariadb_servicethen start manager_service
# pcs constraint colocation addloaderStandby with loaderActive \
score=-INFINITY
# pcs constraint colocation add mariadb_servicewith master mariadb-drbd \
score=INFINITY
# pcs constraint colocation addmanager_service with master manager-drbd \
score=INFINITY
# pcs resource defaultsresource-stickness=100
# pcs resource defaults failure-timeout=60s
# pcs resource defaults migration-threshold=1
# pcs resource meta loaderActivemigration-threshold=20
# pcs resource update op monitorinterval=5s
在zhcbdppm01节点上执行:
# pcs cluster standby zhcbdppm01
等待资源切均换到zhcbdppm02上时,执行:
# pcs cluster unstandby zhcbdppm01
在zhcbdppm02节点上执行:
# pcs cluster standby zhcbdppm02
等待资源切均换到zhcbdppm02上时,执行:
# pcs cluster unstandby zhcbdppm02
例:切换manager_service资源组到zhcbdppm02节点
# crm_resource --resource manager_service--move --node zhcbdppm02
例:将manager_service资源组回切到zhcbdppm01节点
# crm_resource --resource manager_service--move --node zhcbdppm01
# crm_mon
# pcs status
当DRBD服务出现脑裂时,两个节点的内容将不能同步,因此需要进行脑裂恢复。
为防止在恢复过程中造成数据丢失,建议先对数据进行备份。
DRBD出现脑裂时,首先应该停止双机服务,以便释放应用对DRBD磁盘的使用。需要在两个节点分别执行以下命令:
# pcs cluster stop
I. 在两个节点分别手动启动DRBD服务
# service drbd start
II. 在主节点执行命令,强制提升为DRBD主节点
# drbdadm primary all
III. 在备节点执行命令,强制降为DRBD备节点
# drbdadm secondary all
IV. 在备节点上执行数据同步此操作
# drbdadm -- --discard-my-data connect all
V. 在主节点上执行连接操作
# drbdadm connect all
在恢复完DRBD后,需要停止DRBD服务,以便让双机来正常拉动DRBD服务。分别在两个节点执行以下命令
# service drbd stop
在两个节点上,分别执行以下操作
# pcs cluster start
当双机状态出现异常时,需要手动重启双机,以便恢复到正常状态。
I. 分别在两个节点重启双机
# pcs cluster stop
# pcs cluster start
II. 重启服务器后,再启动双机服务
如果出现只重启双机服务后,部分服务不能正常恢复,建议重启服务器后,在启动双机服务:
# reboot
# pcs cluster start