参看:http://freeloda.blog.51cto.com/2033581/1275528
环境准备
1.操作系统
CentOS 6.4 X86_64位系统
2.软件环境
corosync-1.4.1-15.el6_4.1.x86_64
pacemaker-1.1.8-7.el6.x86_64
crmsh-1.2.6-0.rc2.2.1.x86_64
kmod-drbd84-8.4.2-1.el6_3.elrepo.x86_64
drbd84-utils-8.4.2-1.el6.elrepo.x86_64
mysql-5.5.33
3.拓扑环境
4.配置各节点互相解析
node1:
1
2
3
4
5
6
7
|
[root@node1 ~]
# uname -n
node1.
test
.com
[root@node1 ~]
# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.201 node1.
test
.com node1
192.168.1.202 node2.
test
.com node2
|
node2:
1
2
3
4
5
6
7
|
[root@node2 ~]
# uname -n
node2.
test
.com
[root@node2 ~]
# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.201 node1.
test
.com node1
192.168.1.202 node2.
test
.com node2
|
5.配置各节点ssh互信
node1:
1
2
|
[root@node1 ~]
# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
[root@node1 ~]
# ssh-copy-id -i .ssh/id_rsa.pub [email protected]
|
node2:
1
2
|
[root@node2 ~]
# ssh-keygen -t rsa -f ~/.ssh/id_rsa -P ''
[root@node2 ~]
# ssh-copy-id -i .ssh/id_rsa.pub [email protected]
|
6.配置各节点时间同步
node1:
1
|
[root@node1 ~]
# ntpdate 202.120.2.101
|
node2:
1
|
[root@node2 ~]
# ntpdate 202.120.2.101
|
7.各节点关闭防火墙与SELinux
node1:
1
2
3
4
5
6
7
8
9
|
[root@node1 ~]
# service iptables stop
[root@node1 ~]
# chkconfig iptables off
[root@node1 ~]
# cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
|
node2:
1
2
3
4
5
6
7
8
9
|
[root@node2 ~]
# service iptables stop
[root@node2 ~]
# chkconfig iptables off
[root@node2 ~]
# cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=disabled
|
8.各节点安装yum源
node1:
1
2
3
4
5
6
7
8
|
[root@node1 src]
# wget http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
[root@node1 src]
# rpm -ivh epel-release-6-8.noarch.rpm
warning: epel-release-6-8.noarch.rpm: Header V3 RSA
/SHA256Signature
, key ID 0608b895: NOKEY
Preparing...
########################################### [100%]
1:epel-release
########################################### [100%]
[root@node1 src]
# rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
[root@node1 ~]
# rpm -ivh http://elrepo.org/elrepo-release-6-5.el6.elrepo.noarch.rpm
[root@node1 ~]
# yum list
|
node2:
1
2
3
4
5
6
7
8
|
[root@node2 src]
# wget http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
[root@node2 src]
# rpm -ivh epel-release-6-8.noarch.rpm
warning: epel-release-6-8.noarch.rpm: Header V3 RSA
/SHA256Signature
, key ID 0608b895: NOKEY
Preparing...
########################################### [100%]
1:epel-release
########################################### [100%]
[root@node2 src]
# rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-6
[root@node2 ~]
# rpm -ivh http://elrepo.org/elrepo-release-6-5.el6.elrepo.noarch.rpm
[root@node2 ~]
# yum list
|
三、Corosync 安装与配置(参考博文:http://freeloda.blog.51cto.com/2033581/1272417)
1.安装Corosync
node1:
1
|
[root@node1 ~]
# yum install -y corosync
|
node2:
1
|
[root@node2 ~]
# yum install -y corosync
|
2.配置Corosync(注,所有的配置说明我就不详细说明了,因为前面的博文全部讲解过)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
|
[root@node1 ~]
# cd /etc/corosync/
[root@node1 corosync]
# ll
总用量 16
-rw-r--r-- 1 root root 445 5月 15 05:09 corosync.conf.example
-rw-r--r-- 1 root root 1084 5月 15 05:09 corosync.conf.example.udpu
drwxr-xr-x 2 root root 4096 5月 15 05:09 service.d
drwxr-xr-x 2 root root 4096 5月 15 05:09 uidgid.d
[root@node1 corosync]
# cp corosync.conf.example corosync.conf
[root@node1 corosync]
# vim corosync.conf
[root@node1 corosync]
# cat corosync.conf
# Please read the corosync.conf.5 manual page
compatibility: whitetank
totem {
version: 2
secauth: on
threads: 0
interface {
ringnumber: 0
bindnetaddr: 192.168.1.0
mcastaddr: 226.94.10.10
mcastport: 5405
ttl: 1
}
}
logging {
fileline: off
to_stderr: no
to_logfile:
yes
to_syslog: no
logfile:
/var/log/cluster/corosync
.log
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
amf {
mode: disabled
}
service {
ver: 0
name: pacemaker
}
aisexec {
user: root
group: root
}
|
3.生成密钥文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
[root@node1 corosync]
# mv /dev/{random,random.bak}
[root@node1 corosync]
# ln -s /dev/urandom /dev/random
[root@node1 corosync]
# corosync-keygen
Corosync Cluster Engine Authentication key generator.
Gathering 1024 bits
for
key from
/dev/random
.
Press keys on your keyboard to generate entropy.
Writing corosync key to
/etc/corosync/authkey
.
[root@node1 corosync]
# ll
总用量 24
-r-------- 1 root root 128 8月 17 17:17 authkey
-rw-r--r-- 1 root root 544 8月 17 17:14 corosync.conf
-rw-r--r-- 1 root root 445 5月 15 05:09 corosync.conf.example
-rw-r--r-- 1 root root 1084 5月 15 05:09 corosync.conf.example.udpu
drwxr-xr-x 2 root root 4096 5月 15 05:09 service.d
drwxr-xr-x 2 root root 4096 5月 15 05:09 uidgid.d
|
4.将node1上配置文件复制到node2上
1
2
3
|
[root@node1 corosync]
# scp authkey corosync.conf node2:/etc/corosync/
authkey 100% 128 0.1KB
/s
00:00
corosync.conf 100% 542 0.5KB
/s
00:00
|
好了,到这里corosync配置完成,下面我们配置pacemaker
四、Pacemaker 安装与配置(参考博文:http://freeloda.blog.51cto.com/2033581/1274533)
1.安装pacemaker
node1:
1
|
[root@node1 ~]
# yum install -y pacemaker
|
node2:
1
|
[root@node2 ~]
# yum install -y pacemaker
|
2.安装crmsh
node1:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
[root@node1 ~]
# wget http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/crmsh-1.2.6-0.rc2.2.1.x86_64.rpm
[root@node1 ~]
# rpm -ivh crmsh-1.2.6-0.rc2.2.1.x86_64.rpm
warning: crmsh-1.2.6-0.rc2.2.1.x86_64.rpm: Header V3 DSA
/SHA1
Signature, key ID 7b709911: NOKEY
error: Failed dependencies:
pssh is needed by crmsh-1.2.6-0.rc2.2.1.x86_64
python-dateutil is needed by crmsh-1.2.6-0.rc2.2.1.x86_64
python-lxml is needed by crmsh-1.2.6-0.rc2.2.1.x86_64
[root@node1 ~]
# yum install -y python-dateutil python-lxml
[root@node1 ~]
# rpm -ivh crmsh-1.2.6-0.rc2.2.1.x86_64.rpm --nodeps
warning: crmsh-1.2.6-0.rc2.2.1.x86_64.rpm: Header V3 DSA
/SHA1
Signature, key ID 7b709911: NOKEY
Preparing...
########################################### [100%]
1:crmsh
########################################### [100%]
[root@node1 ~]
# crm
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
crm(live)
# help
This is crm shell, a Pacemaker
command
line interface.
Available commands:
cib manage shadow CIBs
resource resources management
configure CRM cluster configuration
node nodes management
options user preferences
history
CRM cluster
history
site Geo-cluster support
ra resource agents information center
status show cluster status
help,? show help (help topics
for
list of topics)
end,
cd
,up go back one level
quit,bye,
exit
exit
the program
|
node2:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
|
[root@node2 ~]
# wget http://download.opensuse.org/repositories/network:/ha-clustering:/Stable/CentOS_CentOS-6/x86_64/crmsh-1.2.6-0.rc2.2.1.x86_64.rpm
[root@node2 ~]
# rpm -ivh crmsh-1.2.6-0.rc2.2.1.x86_64.rpm
warning: crmsh-1.2.6-0.rc2.2.1.x86_64.rpm: Header V3 DSA
/SHA1
Signature, key ID 7b709911: NOKEY
error: Failed dependencies:
pssh is needed by crmsh-1.2.6-0.rc2.2.1.x86_64
python-dateutil is needed by crmsh-1.2.6-0.rc2.2.1.x86_64
python-lxml is needed by crmsh-1.2.6-0.rc2.2.1.x86_64
[root@node2 ~]
# yum install -y python-dateutil python-lxml
[root@node2 ~]
# rpm -ivh crmsh-1.2.6-0.rc2.2.1.x86_64.rpm --nodeps
warning: crmsh-1.2.6-0.rc2.2.1.x86_64.rpm: Header V3 DSA
/SHA1
Signature, key ID 7b709911: NOKEY
Preparing...
########################################### [100%]
1:crmsh
########################################### [100%]
[root@node2 ~]
# crm
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
crm(live)
# help
This is crm shell, a Pacemaker
command
line interface.
Available commands:
cib manage shadow CIBs
resource resources management
configure CRM cluster configuration
node nodes management
options user preferences
history
CRM cluster
history
site Geo-cluster support
ra resource agents information center
status show cluster status
help,? show help (help topics
for
list of topics)
end,
cd
,up go back one level
quit,bye,
exit
exit
the program
|
3.启动corosync(注,在配置corosync时,将pacemaker整合进corosync中,corosync启动的同时也会启动pacemaker)
1
2
3
4
|
[root@node1 ~]
# ssh node2 "service corosync start"
Starting Corosync Cluster Engine (corosync): [确定]
[root@node1 ~]
# service corosync start
Starting Corosync Cluster Engine (corosync): [确定]
|
4.查看启动信息
(1).查看corosync引擎是否正常启动
1
2
3
|
[root@node1 ~]
# grep -e "Corosync Cluster Engine" -e "configuration file" /var/log/cluster/corosync.log
Aug 17 17:31:20 corosync [MAIN ] Corosync Cluster Engine (
'1.4.1'
): started and ready to provide service.
Aug 17 17:31:20 corosync [MAIN ] Successfully
read
main configuration
file
'/etc/corosync/corosync.conf'
.
|
(2).查看初始化成员节点通知是否正常发出
1
2
3
4
5
|
[root@node1 ~]
# grep TOTEM /var/log/cluster/corosync.log
Aug 17 17:31:20 corosync [TOTEM ] Initializing transport (UDP
/IP
Multicast).
Aug 17 17:31:20 corosync [TOTEM ] Initializing transmit
/receive
security: libtomcrypt SOBER128
/SHA1HMAC
(mode 0).
Aug 17 17:31:21 corosync [TOTEM ] The network interface [192.168.1.201] is now up.
Aug 17 17:31:21 corosync [TOTEM ] A processor joined or left the membership and a new membership was formed.
|
(3).检查启动过程中是否有错误产生
1
2
3
|
[root@node1 ~]
# grep ERROR: /var/log/cluster/corosync.log
Aug 17 17:31:21 corosync [pcmk ] ERROR: process_ais_conf: You have configured a cluster using the Pacemaker plugin
for
Corosync. The plugin is not supported
in
this environment and will be removed very soon.
Aug 17 17:31:21 corosync [pcmk ] ERROR: process_ais_conf: Please see Chapter 8 of
'Clusters from Scratch'
(http:
//www
.clusterlabs.org
/doc
)
for
details on using Pacemaker with CMAN
|
(4).查看pacemaker是否正常启动
1
2
3
4
5
6
|
[root@node1 ~]
# grep pcmk_startup /var/log/cluster/corosync.log
Aug 17 17:31:21 corosync [pcmk ] info: pcmk_startup: CRM: Initialized
Aug 17 17:31:21 corosync [pcmk ] Logging: Initialized pcmk_startup
Aug 17 17:31:21 corosync [pcmk ] info: pcmk_startup: Maximum core
file
size is: 18446744073709551615
Aug 17 17:31:21 corosync [pcmk ] info: pcmk_startup: Service: 9
Aug 17 17:31:21 corosync [pcmk ] info: pcmk_startup: Local
hostname
: node1.
test
.com
|
5.查看集群状态
1
2
3
4
5
6
7
8
9
10
|
[root@node1 ~]
# crm status
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
Last updated: Sat Aug 17 17:36:24 2013
Last change: Sat Aug 17 17:31:33 2013 via crmd on node2.
test
.com
Stack: classic openais (with plugin)
Current DC: node2.
test
.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
0 Resources configured.
Online: [ node1.
test
.com node2.
test
.com ]
|
注:node1与node2都在线,DC是node2,符合法定票数
五、DRBD 安装与配置(参考博文:http://freeloda.blog.51cto.com/2033581/1275384)
1.安装DRBD
node1:
1
|
[root@node1 ~]
# yum -y install drbd84 kmod-drbd84
|
node2:
1
|
[root@node1 ~]
# yum -y install drbd84 kmod-drbd84
|
2.配置DRBD
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
|
[root@node1 ~]
# cat /etc/drbd.d/global_common.conf
global {
usage-count no;
#让linbit公司收集目前drbd的使用情况,yes为参加,我们这里不参加设置为no
# minor-count dialog-refresh disable-ip-verification
}
common {
handlers {
pri-on-incon-degr
"/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f"
;
pri-lost-after-sb
"/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f"
;
local
-io-error
"/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f"
;
# fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
# split-brain "/usr/lib/drbd/notify-split-brain.sh root";
# out-of-sync "/usr/lib/drbd/notify-out-of-sync.sh root";
# before-resync-target "/usr/lib/drbd/snapshot-resync-target-lvm.sh -p 15 -- -c 16k";
# after-resync-target /usr/lib/drbd/unsnapshot-resync-target-lvm.sh;
}
startup {
# wfc-timeout degr-wfc-timeout outdated-wfc-timeout wait-after-sb
}
options {
# cpu-mask on-no-data-accessible
}
disk {
# size max-bio-bvecs on-io-error fencing disk-barrier disk-flushes
# disk-drain md-flushes resync-rate resync-after al-extents
# c-plan-ahead c-delay-target c-fill-target c-max-rate
# c-min-rate disk-timeout
on-io-error detach;
#同步错误的做法是分离
}
net {
# protocol timeout max-epoch-size max-buffers unplug-watermark
# connect-int ping-int sndbuf-size rcvbuf-size ko-count
# allow-two-primaries cram-hmac-alg shared-secret after-sb-0pri
# after-sb-1pri after-sb-2pri always-asbp rr-conflict
# ping-timeout data-integrity-alg tcp-cork on-congestion
# congestion-fill congestion-extents csums-alg verify-alg
# use-rle
cram-hmac-alg
"sha1"
;
#设置加密算法sha1
shared-secret
"mydrbdlab"
;
#设置加密key
}
}
|
3.增加资源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
[root@node1 drbd.d]
# cat web.res
resource web {
on node1.
test
.com {
device
/dev/drbd0
;
disk
/dev/sdb
;
address 192.168.1.201:7789;
meta-disk internal;
}
on node2.
test
.com {
device
/dev/drbd0
;
disk
/dev/sdb
;
address 192.168.1.202:7789;
meta-disk internal;
}
}
|
4.同步配置文件到node2’
1
|
[root@node1 drbd.d]
# scp global_common.conf web.res node2:/etc/drbd.d/
|
5.node1与node2上初始化资源
node1:
1
2
3
4
5
|
[root@node1 ~]
# drbdadm create-md web
Writing meta data...
initializing activity log
NOT initializing bitmap
New drbd meta data block successfully created.
|
node2:
1
2
3
4
5
|
[root@node2 ~]
# drbdadm create-md web
Writing meta data...
initializing activity log
NOT initializing bitmap
New drbd meta data block successfully created.
|
6.启动DRBD
node1:
1
|
[root@node1 ~]
# service drbd start
|
node2:
1
|
[root@node2 ~]
# service drbd start
|
7.查看一下状态
node1:
1
2
|
[root@node1 ~]
# drbd-overview
0:web
/0Connected
Secondary
/SecondaryInconsistent/InconsistentC
r-----
|
node2:
1
2
|
[root@node2 ~]
# drbd-overview
0:web
/0Connected
Secondary
/SecondaryInconsistent/InconsistentC
r-----
|
8.设置node1为主节点
1
2
3
|
[root@node1 ~]
# drbdadm -- --overwrite-data-of-peer primary web
[root@node1 ~]
# drbd-overview
0:web
/0
Connected Primary
/Secondary
UpToDate
/UpToDate
C r-----
|
9.格式化并挂载
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
[root@node1 ~]
# mke2fs -j /dev/drbd
[root@node1 ~]
# mkdir /drbd
[root@node1 ~]
# mount /dev/drbd0 /drbd/
[root@node1 ~]
# mount
/dev/sda2on
/ typeext4 (rw)
proc on
/proctypeproc
(rw)
sysfs on
/systypesysfs
(rw)
devpts on
/dev/ptstypedevpts
(rw,gid=5,mode=620)
tmpfs on
/dev/shmtypetmpfs
(rw)
/dev/sda1on
/boottypeext4
(rw)
/dev/sda3on
/datatypeext4
(rw)
none on
/proc/sys/fs/binfmt_misctypebinfmt_misc
(rw)
/dev/drbd0on
/drbdtypeext3
(rw)
[root@node1 ~]
# cd /drbd/
[root@node1 drbd]
# cp /etc/inittab /drbd/
[root@node1 drbd]
# ll
总用量 20
-rw-r--r-- 1 root root 884 8月 17 13:50 inittab
drwx------ 2 root root 16384 8月 17 13:49 lost+found
|
10.设置node2为主节点
1
2
3
4
5
6
7
8
9
10
11
12
|
[root@node1 ~]
# umount /drbd/
[root@node1 ~]
# drbdadm secondary web
[root@node1 ~]
# drbd-overview
[root@node2 ~]
# drbdadm primary web
[root@node2 ~]
# drbd-overview
0:web
/0Connected
Primary
/SecondaryUpToDate/UpToDateC
r-----
[root@node2 ~]
# mkdir /drbd
[root@node2 ~]
# mount /dev/drbd0 /drbd/
[root@node2 ~]
# ll /drbd/
总用量 20
-rw-r--r-- 1 root root 884 8月 17 13:50 inittab
drwx------ 2 root root 16384 8月 17 13:49 lost+found
|
好了,到这里DRBD配置全部完成,下面我们来配置MySQl
六、MySQL 安装与配置
1.安装Mysql
node1:
1
2
3
4
5
6
|
[root@node1 ~]
# tar xf mysql-5.5.33-linux2.6-x86_64.tar.gz -C /usr/local/
[root@node1 ~]
# cd /usr/local/
[root@node1
local
]
# ln -sv mysql-5.5.33-linux2.6-x86_64 mysql
"mysql"
->
"mysql-5.5.33-linux2.6-x86_64"
[root@node1 ~]
# chown -R root:mysql /usr/local/mysql/*
[root@node1 ~]
# scp mysql-5.5.33-linux2.6-x86_64.tar.gz node2:/root/
|
node2:
1
2
3
4
5
|
[root@node2 ~]
# tar xf mysql-5.5.33-linux2.6-x86_64.tar.gz -C /usr/local/
[root@node2 ~]
# cd /usr/local/
[root@node2
local
]
# ln -sv mysql-5.5.33-linux2.6-x86_64 mysql
"mysql"
->
"mysql-5.5.33-linux2.6-x86_64"
[root@node2 ~]
# chown -R root:mysql /usr/local/mysql/*
|
2.创建Mysql用户与组
node1:
(1).创建mysql组
1
|
[root@node1 ~]
# groupadd -g 3306 mysql
|
(2).创建mysql用户
1
|
[root@node1 ~]
# useradd -u 3306 -g mysql -s /sbin/nologin -M mysql
|
(3).查看
1
2
|
[root@node1 ~]
# id mysql
uid=3306(mysql) gid=3306(mysql)
groups
=3306(mysql)
|
node2:
(1).创建mysql组
1
|
[root@node2 ~]
# groupadd -g 3306 mysql
|
(2).创建mysql用户
1
|
[root@node2 ~]
# useradd -u 3306 -g mysql -s /sbin/nologin -M mysql
|
(3).查看
1
2
|
[root@node2 ~]
# id mysql
uid=3306(mysql) gid=3306(mysql)
groups
=3306(mysql)
|
3.将node1的DRBD设置为主节点并挂载
1
2
3
4
5
6
7
8
9
10
11
|
[root@node1 ~]
# drbd-overview
0:web
/0
Connected Primary
/Secondary
UpToDate
/UpToDate
C r-----
[root@node1 ~]
# mkdir /mydata
[root@node1 ~]
# mount /dev/drbd0 /mydata/
[root@node1 ~]
# cd /mydata/
[root@node1 mydata]
# mkdir data
[root@node1 mydata]
# chown -R mysql.mysql /mydata/data/
[root@node1 mydata]
# ll
总用量 20
drwxr-xr-x 2 mysql mysql 4096 8月 17 18:37 data
drwx------ 2 root root 16384 8月 17 13:49 lost+found
|
4.提供配置文件
1
2
3
4
|
[root@node1 ~]
# cp /usr/local/mysql/support-files/my-large.cnf /etc/my.cnf
[root@node1 ~]
# vim /etc/my.cnf
#增加一行
datadir =
/mydata/data
|
5.初始化Mysql
1
2
3
4
5
|
[root@node1 data]
# /usr/local/mysql/scripts/mysql_install_db --datadir=/mydata/data/ --basedir=/usr/local/mysql --user=mysql
[root@node1 data]
# ll
总用量 8
drwx------ 2 mysql root 4096 8月 17 18:40 mysql
drwx------ 2 mysql root 4096 8月 17 18:40
test
|
6.提供启动脚本
1
2
|
[root@node1 ~]
# cp /usr/local/mysql/support-files/mysql.server /etc/init.d/mysqld
[root@node1 ~]
# chmod +x /etc/init.d/mysqld
|
7.启动并测试Mysql
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
[root@node1 data]
# service mysqld start
Starting MySQL..... SUCCESS!
[root@node1 data]
# /usr/local/mysql/bin/mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection
id
is 1
Server version: 5.5.33-log MySQL Community Server (GPL)
Copyright (c) 2000, 2013, Oracle and
/or
its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and
/or
its
affiliates. Other names may be trademarks of their respective
owners.
Type
'help;'
or
'\h'
for
help. Type
'\c'
to
clear
the current input statement.
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
|
test
|
+--------------------+
4 rows
in
set
(0.08 sec)
mysql>
|
8.将node1上mysql配置文件与启动脚本复制到node2上
1
2
3
4
|
[root@node1 ~]
# scp /etc/my.cnf node2:/etc/
my.cnf 100% 4687 4.6KB
/s
00:00
[root@node1 ~]
# scp /etc/init.d/mysqld node2:/etc/init.d/
mysqld 100% 11KB 10.6KB
/s
00:00
|
9.关闭mysql并设置开机不启动
1
2
3
4
5
|
[root@node1 ~]
# service mysqld stop
Shutting down MySQL. SUCCESS!
[root@node1 data]
# chkconfig mysqld off
[root@node1 data]
# chkconfig mysqld --list
mysqld 0:关闭 1:关闭 2:关闭 3:关闭 4:关闭 5:关闭 6:关闭
|
10.将node2节点上的DRBD设置为主节点并挂载
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
[root@node1 ~]
# umount /mydata/
[root@node1 ~]
# drbdadm secondary web
[root@node1 ~]
# drbd-overview
0:web
/0
Connected Secondary
/Secondary
UpToDate
/UpToDate
C r-----
[root@node2 ~]
# drbdadm primary web
[root@node2 ~]
# drbd-overview
0:web
/0
Connected Primary
/Secondary
UpToDate
/UpToDate
C r-----
[root@node2 ~]
# mkdir /mydata
[root@node2 ~]
# mount /dev/drbd0 /mydata/
[root@node2 ~]
# cd /mydata/
[root@node2 mydata]
# ll
总用量 20
drwxr-xr-x 5 mysql mysql 4096 8月 17 19:41 data
drwx------ 2 root root 16384 8月 17 13:49 lost+found
|
11.启动并测试node2上的mysql
1
2
3
4
5
6
7
8
9
10
11
12
|
[root@node2 data]
# service mysqld start
Starting MySQL... SUCCESS!
[root@node2 data]
# /usr/local/mysql/bin/mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection
id
is 1
Server version: 5.5.33-log MySQL Community Server (GPL)
Copyright (c) 2000, 2013, Oracle and
/or
its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and
/or
its
affiliates. Other names may be trademarks of their respective
owners.
Type
'help;'
or
'\h'
for
help. Type
'\c'
to
clear
the current input statement.
mysql>
|
12.关闭node2上mysql并设置开机不启动
1
2
3
4
5
|
[root@node2 ~]
# service mysqld stop
Shutting down MySQL. SUCCESS!
[root@node2 ~]
# chkconfig mysqld off
[root@node2 ~]
# chkconfig mysqld --list
mysqld 0:关闭 1:关闭 2:关闭 3:关闭 4:关闭 5:关闭 6:关闭
|
好了,到这里mysql配置全部完成,有一点得说明一下。启动mysql时,可能会出错查看一下错误日志会发现缺一下库文件 libaio,这里我们用yum install -y libaio安装一下,便能顺利启动。
七、crmsh 资源管理
1.关闭drbd并设置开机不启动
node1:
1
2
3
4
5
|
[root@node1 ~]
# service drbd stop
Stopping all DRBD resources: .
[root@node1 ~]
# chkconfig drbd off
[root@node1 ~]
# chkconfig drbd --list
drbd 0:关闭 1:关闭 2:关闭 3:关闭 4:关闭 5:关闭 6:关闭
|
node2:
1
2
3
4
5
|
[root@node2 ~]
# service drbd stop
Stopping all DRBD resources:
[root@node2 ~]
# chkconfig drbd off
[root@node2 ~]
# chkconfig drbd --list
drbd 0:关闭 1:关闭 2:关闭 3:关闭 4:关闭 5:关闭 6:关闭
|
2.增加drbd资源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
|
[root@node1 ~]
# crm
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
crm(live)
# configure
crm(live)configure
# property stonith-enabled=false
crm(live)configure
# property no-quorum-policy=ignore
crm(live)configure
# verify
crm(live)configure
# commit
crm(live)configure
# primitive mysqldrbd ocf:heartbeat:drbd params drbd_resource=web op start timeout=240 op stop timeout=100 op monitor role=Master interval=20 timeout=30 op monitor role=Slave interval=30 timeout=30
crm(live)configure
# ms ms_mysqldrbd mysqldrbd meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
crm(live)configure
# show
node node1.
test
.com
node node2.
test
.com
primitive mysqldrbd ocf:heartbeat:drbd \
params drbd_resource=
"web"
\
op
start timeout=
"240"
interval=
"0"
\
op
stop timeout=
"100"
interval=
"0"
\
op
monitor role=
"Master"
interval=
"20"
timeout=
"30"
\
op
monitor role=
"Slave"
interval=
"30"
timeout=
"30"
ms ms_mysqldrbd mysqldrbd \
meta master-max=
"1"
master-node-max=
"1"
clone-max=
"2"
clone-node-max=
"1"
notify=
"true"
property $
id
=
"cib-bootstrap-options"
\
dc
-version=
"1.1.8-7.el6-394e906"
\
cluster-infrastructure=
"classic openais (with plugin)"
\
expected-quorum-votes=
"2"
\
stonith-enabled=
"false"
\
no-quorum-policy=
"ignore"
[root@node1 ~]
# crm status
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
Last updated: Sat Aug 17 20:14:16 2013
Last change: Sat Aug 17 20:12:55 2013 via cibadmin on node1.
test
.com
Stack: classic openais (with plugin)
Current DC: node1.
test
.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
2 Resources configured.
Online: [ node1.
test
.com node2.
test
.com ]
Master
/Slave
Set: ms_mysqldrbd [mysqldrbd]
Masters: [ node2.
test
.com ]
Slaves: [ node1.
test
.com ]
|
3.增加文件系统资源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
crm(live)configure
# primitive mystore ocf:heartbeat:Filesystem params device=/dev/drbd0 directory=/mydata fstype=ext3 op start timeout=60 op stop timeout=60
crm(live)configure
# verify
crm(live)configure
# colocation mystore_with_ms_mysqldrbd inf: ms_mysqldrbd:Master
ERROR: syntax
in
colocation: colocation mystore_with_ms_mysqldrbd inf: ms_mysqldrbd:Master
crm(live)configure
# colocation mystore_with_ms_mysqldrbd inf: mystore ms_mysqldrbd:Master
crm(live)configure
# order mystore_after_ms_mysqldrbd mandatory: ms_mysqldrbd:promote mystore:start
crm(live)configure
# verify
crm(live)configure
# commit
crm(live)configure
# show
node node1.
test
.com
node node2.
test
.com
primitive mysqldrbd ocf:heartbeat:drbd \
params drbd_resource=
"web"
\
op
start timeout=
"240"
interval=
"0"
\
op
stop timeout=
"100"
interval=
"0"
\
op
monitor role=
"Master"
interval=
"20"
timeout=
"30"
\
op
monitor role=
"Slave"
interval=
"30"
timeout=
"30"
primitive mystore ocf:heartbeat:Filesystem \
params device=
"/dev/drbd0"
directory=
"/mydata"
fstype=
"ext3"
\
op
start timeout=
"60"
interval=
"0"
\
op
stop timeout=
"60"
interval=
"0"
ms ms_mysqldrbd mysqldrbd \
meta master-max=
"1"
master-node-max=
"1"
clone-max=
"2"
clone-node-max=
"1"
notify=
"true"
colocation mystore_with_ms_mysqldrbd inf: mystore ms_mysqldrbd:Master
order mystore_after_ms_mysqldrbd inf: ms_mysqldrbd:promote mystore:start
property $
id
=
"cib-bootstrap-options"
\
dc
-version=
"1.1.8-7.el6-394e906"
\
cluster-infrastructure=
"classic openais (with plugin)"
\
expected-quorum-votes=
"2"
\
stonith-enabled=
"false"
\
no-quorum-policy=
"ignore"
[root@node1 ~]
# crm status
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
Last updated: Sat Aug 17 20:37:26 2013
Last change: Sat Aug 17 20:19:51 2013 via cibadmin on node1.
test
.com
Stack: classic openais (with plugin)
Current DC: node1.
test
.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
3 Resources configured.
Online: [ node1.
test
.com node2.
test
.com ]
Master
/Slave
Set: ms_mysqldrbd [mysqldrbd]
Masters: [ node2.
test
.com ]
Slaves: [ node1.
test
.com ]
mystore (ocf::heartbeat:Filesystem): Started node2.
test
.com
[root@node2 ~]
# mount
/dev/sda2
on /
type
ext4 (rw)
proc on
/proc
type
proc (rw)
sysfs on
/sys
type
sysfs (rw)
devpts on
/dev/pts
type
devpts (rw,gid=5,mode=620)
tmpfs on
/dev/shm
type
tmpfs (rw)
/dev/sda1
on
/boot
type
ext4 (rw)
/dev/sda3
on
/data
type
ext4 (rw)
none on
/proc/sys/fs/binfmt_misc
type
binfmt_misc (rw)
/dev/drbd0
on
/mydata
type
ext3 (rw)
[root@node2 ~]
# cd /mydata/data/
[root@node2 data]
# ll
总用量 29824
-rw-rw---- 1 mysql mysql 18874368 8月 17 19:48 ibdata1
-rw-rw---- 1 mysql mysql 5242880 8月 17 19:48 ib_logfile0
-rw-rw---- 1 mysql mysql 5242880 8月 17 18:57 ib_logfile1
drwx------ 2 mysql root 4096 8月 17 18:57 mysql
-rw-rw---- 1 mysql mysql 27698 8月 17 18:57 mysql-bin.000001
-rw-rw---- 1 mysql mysql 1061358 8月 17 18:57 mysql-bin.000002
-rw-rw---- 1 mysql mysql 126 8月 17 19:41 mysql-bin.000003
-rw-rw---- 1 mysql mysql 126 8月 17 19:48 mysql-bin.000004
-rw-rw---- 1 mysql mysql 76 8月 17 19:47 mysql-bin.index
-rw-r----- 1 mysql root 2302 8月 17 19:41 node1.
test
.com.err
-rw-r----- 1 mysql root 1849 8月 17 19:48 node2.
test
.com.err
drwx------ 2 mysql mysql 4096 8月 17 18:57 performance_schema
drwx------ 2 mysql root 4096 8月 17 18:57
test
|
4.增加mysql资源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
|
crm(live)
# configure
crm(live)configure
# primitive mysqld lsb:mysqld
crm(live)configure
# colocation mysqld_with_mystore inf: mysqld mystore
crm(live)configure
# verify
crm(live)configure
# show
node node1.
test
.com
node node2.
test
.com
primitive mysqld lsb:mysqld
primitive mysqldrbd ocf:heartbeat:drbd \
params drbd_resource=
"web"
\
op
start timeout=
"240"
interval=
"0"
\
op
stop timeout=
"100"
interval=
"0"
\
op
monitor role=
"Master"
interval=
"20"
timeout=
"30"
\
op
monitor role=
"Slave"
interval=
"30"
timeout=
"30"
primitive mystore ocf:heartbeat:Filesystem \
params device=
"/dev/drbd0"
directory=
"/mydata"
fstype=
"ext3"
\
op
start timeout=
"60"
interval=
"0"
\
op
stop timeout=
"60"
interval=
"0"
ms ms_mysqldrbd mysqldrbd \
meta master-max=
"1"
master-node-max=
"1"
clone-max=
"2"
clone-node-max=
"1"
notify=
"true"
colocation mysqld_with_mystore inf: mysqld mystore
colocation mystore_with_ms_mysqldrbd inf: mystore ms_mysqldrbd:Master
order mystore_after_ms_mysqldrbd inf: ms_mysqldrbd:promote mystore:start
property $
id
=
"cib-bootstrap-options"
\
dc
-version=
"1.1.8-7.el6-394e906"
\
cluster-infrastructure=
"classic openais (with plugin)"
\
expected-quorum-votes=
"2"
\
stonith-enabled=
"false"
\
no-quorum-policy=
"ignore"
crm(live)configure
# order mysqld_after_mystore mandatory: mystore mysqld
crm(live)configure
# verify
crm(live)configure
# show
node node1.
test
.com
node node2.
test
.com
primitive mysqld lsb:mysqld
primitive mysqldrbd ocf:heartbeat:drbd \
params drbd_resource=
"web"
\
op
start timeout=
"240"
interval=
"0"
\
op
stop timeout=
"100"
interval=
"0"
\
op
monitor role=
"Master"
interval=
"20"
timeout=
"30"
\
op
monitor role=
"Slave"
interval=
"30"
timeout=
"30"
primitive mystore ocf:heartbeat:Filesystem \
params device=
"/dev/drbd0"
directory=
"/mydata"
fstype=
"ext3"
\
op
start timeout=
"60"
interval=
"0"
\
op
stop timeout=
"60"
interval=
"0"
ms ms_mysqldrbd mysqldrbd \
meta master-max=
"1"
master-node-max=
"1"
clone-max=
"2"
clone-node-max=
"1"
notify=
"true"
colocation mysqld_with_mystore inf: mysqld mystore
colocation mystore_with_ms_mysqldrbd inf: mystore ms_mysqldrbd:Master
order mysqld_after_mystore inf: mystore mysqld
order mystore_after_ms_mysqldrbd inf: ms_mysqldrbd:promote mystore:start
property $
id
=
"cib-bootstrap-options"
\
dc
-version=
"1.1.8-7.el6-394e906"
\
cluster-infrastructure=
"classic openais (with plugin)"
\
expected-quorum-votes=
"2"
\
stonith-enabled=
"false"
\
no-quorum-policy=
"ignore"
crm(live)configure
# commit
[root@node1 ~]
# crm status
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
Last updated: Sat Aug 17 20:46:35 2013
Last change: Sat Aug 17 20:46:07 2013 via cibadmin on node1.
test
.com
Stack: classic openais (with plugin)
Current DC: node1.
test
.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
4 Resources configured.
Online: [ node1.
test
.com node2.
test
.com ]
Master
/Slave
Set: ms_mysqldrbd [mysqldrbd]
Masters: [ node2.
test
.com ]
Slaves: [ node1.
test
.com ]
mystore (ocf::heartbeat:Filesystem): Started node2.
test
.com
mysqld (lsb:mysqld): Started node2.
test
.com
[root@node2 ~]
# netstat -ntulp | grep :3306
tcp 0 0 0.0.0.0:3306 0.0.0.0:* LISTEN 26907
/mysqld
[root@node2 ~]
# /usr/local/mysql/bin/mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection
id
is 1
Server version: 5.5.33-log MySQL Community Server (GPL)
Copyright (c) 2000, 2013, Oracle and
/or
its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and
/or
its
affiliates. Other names may be trademarks of their respective
owners.
Type
'help;'
or
'\h'
for
help. Type
'\c'
to
clear
the current input statement.
mysql>
|
5.增加vip资源
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
|
crm(live)
# configure
crm(live)configure
# primitive vip ocf:heartbeat:IPaddr params ip=192.168.1.200 nic=eth0 cidr_netmask=255.255.255.0
crm(live)configure
# colocation vip_with_ms_mysqldrbd inf: ms_mysqldrbd:Master vip
crm(live)configure
# verify
crm(live)configure
# show
node node1.
test
.com
node node2.
test
.com
primitive mysqld lsb:mysqld
primitive mysqldrbd ocf:heartbeat:drbd \
params drbd_resource=
"web"
\
op
start timeout=
"240"
interval=
"0"
\
op
stop timeout=
"100"
interval=
"0"
\
op
monitor role=
"Master"
interval=
"20"
timeout=
"30"
\
op
monitor role=
"Slave"
interval=
"30"
timeout=
"30"
primitive mystore ocf:heartbeat:Filesystem \
params device=
"/dev/drbd0"
directory=
"/mydata"
fstype=
"ext3"
\
op
start timeout=
"60"
interval=
"0"
\
op
stop timeout=
"60"
interval=
"0"
primitive vip ocf:heartbeat:IPaddr \
params ip=
"192.168.1.200"
nic=
"eth0"
cidr_netmask=
"255.255.255.0"
ms ms_mysqldrbd mysqldrbd \
meta master-max=
"1"
master-node-max=
"1"
clone-max=
"2"
clone-node-max=
"1"
notify=
"true"
colocation mysqld_with_mystore inf: mysqld mystore
colocation mystore_with_ms_mysqldrbd inf: mystore ms_mysqldrbd:Master
colocation vip_with_ms_mysqldrbd inf: ms_mysqldrbd:Master vip
order mysqld_after_mystore inf: mystore mysqld
order mystore_after_ms_mysqldrbd inf: ms_mysqldrbd:promote mystore:start
property $
id
=
"cib-bootstrap-options"
\
dc
-version=
"1.1.8-7.el6-394e906"
\
cluster-infrastructure=
"classic openais (with plugin)"
\
expected-quorum-votes=
"2"
\
stonith-enabled=
"false"
\
crm(live)configure
# commit
[root@node1 ~]
# crm status
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
Last updated: Sat Aug 17 20:53:15 2013
Last change: Sat Aug 17 20:52:11 2013 via cibadmin on node1.
test
.com
Stack: classic openais (with plugin)
Current DC: node1.
test
.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
5 Resources configured.
Online: [ node1.
test
.com node2.
test
.com ]
Master
/Slave
Set: ms_mysqldrbd [mysqldrbd]
Masters: [ node1.
test
.com ]
Slaves: [ node2.
test
.com ]
mystore (ocf::heartbeat:Filesystem): Started node1.
test
.com
mysqld (lsb:mysqld): Started node1.
test
.com
vip (ocf::heartbeat:IPaddr): Started node1.
test
.com
|
好了,到这里所有的资源配置全部完成,下面我们进行测试一下。
6.测试mysql高可用集群
(1).新增授权
1
2
3
4
5
6
7
8
9
10
11
12
13
|
[root@node1 ~]
# /usr/local/mysql/bin/mysql
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection
id
is 2
Server version: 5.5.33-log MySQL Community Server (GPL)
Copyright (c) 2000, 2013, Oracle and
/or
its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and
/or
its
affiliates. Other names may be trademarks of their respective
owners.
Type
'help;'
or
'\h'
for
help. Type
'\c'
to
clear
the current input statement.
mysql> grant all on *.* to root@
"192.168.1.%"
identified by
"123456"
;
Query OK, 0 rows affected (0.10 sec)
mysql> flush privileges;
Query OK, 0 rows affected (0.00 sec)
|
(2).远程测试一下
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
|
[root@nfs ~]
# yum install -y mysql
[root@nfs ~]
# mysql
ERROR 2002 (HY000): Can
't connect to local MySQL server through socket '
/var/lib/mysql/mysql
.sock' (2)
[root@nfs ~]
# mysql -uroot -p123456 -h 192.168.1.200
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection
id
is 3
Server version: 5.5.33-log MySQL Community Server (GPL)
Copyright (c) 2000, 2013, Oracle and
/or
its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and
/or
its
affiliates. Other names may be trademarks of their respective
owners.
Type
'help;'
or
'\h'
for
help. Type
'\c'
to
clear
the current input statement.
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
|
test
|
+--------------------+
4 rows
in
set
(0.06 sec)
mysql> create database mydb;
Query OK, 1 row affected (0.00 sec)
mysql>
|
(3).模拟一下故障
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
|
[root@node1 ~]
# crm
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
crm(live)
# node
crm(live)node
# standby
crm(live)node
# show
node1.
test
.com: normal
standby: on
node2.
test
.com: normal
[root@node2 ~]
# crm status
Cannot change active directory to
/var/lib/pacemaker/cores/root
: No such
file
or directory (2)
Last updated: Sat Aug 17 21:02:13 2013
Last change: Sat Aug 17 21:02:00 2013 via crm_attribute on node1.
test
.com
Stack: classic openais (with plugin)
Current DC: node1.
test
.com - partition with quorum
Version: 1.1.8-7.el6-394e906
2 Nodes configured, 2 expected votes
5 Resources configured.
Node node1.
test
.com: standby
Online: [ node2.
test
.com ]
Master
/Slave
Set: ms_mysqldrbd [mysqldrbd]
Masters: [ node2.
test
.com ]
Stopped: [ mysqldrbd:1 ]
mystore (ocf::heartbeat:Filesystem): Started node2.
test
.com
mysqld (lsb:mysqld): Started node2.
test
.com
vip (ocf::heartbeat:IPaddr): Started node2.
test
.com
|
(4).再进行远程测试一下
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
mysql> create database testdb;
ERROR 2006 (HY000): MySQL server has gone away
No connection. Trying to reconnect...
Connection
id
: 1
Current database: *** NONE ***
Query OK, 1 row affected (0.42 sec)
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mydb |
| mysql |
| performance_schema |
|
test
|
| testdb |
+--------------------+
6 rows
in
set
(0.12 sec)
mysql>
|