八、安装MYSQL
1、在节点1上安装MYSQL所以要把节点1做为主节点
#把节点2降级为次节点
[root@node2 ~]# drbdadm secondary web
[root@node2 ~]# umount /dev/drbd0
[root@node2 drbd.d]# drbd-overview
0:web Connected Secondary/Secondary UpToDate/UpToDate C r----
#把节点1升级为主节点并挂载
[root@node1 ~]# drbdadm primary web
[root@node1 ~]# mount /dev/drbd0 /web
[root@node1 ~]# drbd-overview
0:web Connected Primary/Secondary UpToDate/UpToDate C r---- /web ext3 1.9G 36M 1.8G 2%
#删除此前为了验证放在目录里的RPM包
[root@node1 ~]# cd /web
[root@node1 web]# rm -rf *.rpm
2、创建MYSQL的数据目录
#创建MYSQL的数据目录
[root@node1 web]# mkdir -pv mydata/data
mkdir: created directory `mydata'
mkdir: created directory `mydata/data'
创建mysql组和用户
#在节点1上创建
[root@node1 web]# groupadd -r -g 306 mysql
[root@node1 web]# useradd -g mysql -r -s /sbin/nologin -M -u 306 mysql
[root@node1 web]# id mysql
uid=306(mysql) gid=306(mysql) groups=306(mysql)
#在节点2上创建
[root@node2 ~]# groupadd -r -g 306 mysql
[root@node2 ~]# useradd -g mysql -r -s /sbin/nologin -M -u 306 mysql
[root@node2 ~]# id mysql
uid=306(mysql) gid=306(mysql) groups=306(mysql)
说明:在节点1和节点2上的mysql组和用户必须完全一样
#把MYSQL的数据目录的属组属主改为mysql
[root@node1 web]# chown -R mysql:mysql /web/mydata
[root@node1 web]# ll /web
total 20
drwx------ 2 root root 16384 Dec 19 05:56 lost+found
drwxr-xr-x 3 mysql mysql 4096 Dec 20 01:46 mydata
#验证在节点2上是否和节点1上一致
#先将节点1降级为次节点
[root@node1 ~]# umount /dev/drbd0
[root@node1 ~]# mount
[root@node1 ~]# drbdadm secondary web
[root@node1 ~]# drbd-overview
0:web Connected Secondary/Secondary UpToDate/UpToDate C r----
#将节点升级为主节点并查看目录
[root@node2 ~]# drbdadm primary web
[root@node2 ~]# mount /dev/drbd0 /web
[root@node2 ~]# mount
[root@node2 ~]# ll /web
total 20
drwx------ 2 root root 16384 Dec 19 05:56 lost+found
drwxr-xr-x 3 mysql mysql 4096 Dec 20 01:46 mydata
说明:节点2上的MYSQL数据目录属主和属组一致 说明一切正常
3、准备MYSQL安装包和安装
#准备二进制安装包
mysql-5.5.19-linux2.6-i686.tar.gz
#在主节点上解压并安装
[root@node2 ~]# tar vxf mysql-5.5.19-linux2.6-i686.tar.gz -C /usr/local
[root@node2 ~]# cd /usr/local
[root@node2 local]# ln -sv mysql-5.5.19-linux2.6-i686/ mysql
[root@node2 local]# cd mysql
[root@node2 mysql]# chown -R mysql:mysql .
[root@node2 mysql]# ll
#初始化数据库
[root@node2 mysql]# scripts/mysql_install_db --user=mysql --datadir=/web/mydata/data
[root@node2 mysql]# ls /web/mydata/data
mysql performance_schema test
#修改mysql目录的属主为root
[root@node2 mysql]# chown -R root /usr/local/mysql/
[root@node2 mysql]# ll
#配置主配置文件
[root@node2 mysql]# cd support-files/
[root@node2 support-files]# cp my-large.cnf /etc/my.cnf
[root@node2 support-files]# vim /etc/my.cnf
#修改以下几项
thread_concurrency = 2
datadir = /web/mydata/data
#此行是添加的
#配置使用命令启动MYSQL
[root@node2 support-files]# cp mysql.server /etc/init.d/mysqld
[root@node2 support-files]# chkconfig --add mysqld
[root@node2 support-files]# service mysqld start
Starting MySQL......... [ OK ]
#查看MYSQL的监听端口是否开启
[root@node2 support-files]# netstat -tnlp
#添加MYSQL客户端的PATH变量
[root@node2 support-files]# vim /etc/profile
#添加此行
PATH=$PATH:/usr/local/mysql/bin
#重读此配置文件
[root@node2 support-files]# . /etc/profile
#连接MYSQL服务器
[root@node2 support-files]# mysql
#创建mydb数据库备以后验证使用
mysql> create database mydb;
Query OK, 1 row affected (0.00 sec)
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mydb |
| mysql |
| performance_schema |
| test |
+--------------------+
5 rows in set (0.00 sec)
说明:如果连接创建成功说明mysql安装成功
#此时关闭mysql服务
[root@node2 support-files]# cd
[root@node2 ~]# service mysqld stop
Shutting down MySQL.. [ OK ]
4、在次节点我们如何安装mysql呢?
首先把mysql安装包拷到次节点上
[root@node2 ~]scp mysql-5.5.19-linux2.6-i686.tar.gz node1:/root
在节点1上安装配置mysql
[root@node1 ~]# tar vxf mysql-5.5.19-linux2.6-i686.tar.gz -C /usr/local
[root@node1 ~]# cd /usr/local
[root@node1 local]# ln -sv mysql-5.5.19-linux2.6-i686/ mysql
[root@node1 mysql]# chown mysql:mysql /usr/local/mysql/*
[root@node1 mysql]# chown root /usr/local/mysql/*
[root@node1 mysql]# ll
说明:此时目录下的文件属主属组和节点2的一致
#把节点2修改的配置文件拷贝到节点1上
[root@node2 ~]# scp /etc/my.cnf node1:/etc
[root@node2 ~]# scp /etc/init.d/mysqld node1:/etc/init.d/
[root@node2 ~]# scp /etc/profile node1:/etc
#重读此配置文件
[root@node1 ~]# . /etc/profile
此时节点1还不能启动mysql,因此我们要将节点1变为主节点
#节点1操作
[root@node2 ~]# umount /dev/drbd0
[root@node2 ~]# drbdadm secondary web
[root@node2 ~]# drbd-overview
0:web Connected Secondary/Secondary UpToDate/UpToDate C r----
#节点2操作
[root@node1 ~]# drbdadm primary web
[root@node1 ~]# mount /dev/drbd0 /web
[root@node1 ~]# drbd-overview
0:web Connected Primary/Secondary UpToDate/UpToDate C r---- /web ext3 1.9G 65M 1.7G 4%
#查看节点1上的数据目录是否有文件
[root@node1 ~]# ll /web/mydata/data/
total 28752
-rw-rw---- 1 mysql mysql 18874368 Dec 20 03:01 ibdata1
-rw-rw---- 1 mysql mysql 5242880 Dec 20 03:01 ib_logfile0
-rw-rw---- 1 mysql mysql 5242880 Dec 20 02:34 ib_logfile1
drwx------ 2 mysql mysql 4096 Dec 20 02:53 mydb
drwx------ 2 mysql root 4096 Dec 20 02:22 mysql
-rw-rw---- 1 mysql mysql 126 Dec 20 02:42 mysql-bin.000001
-rw-rw---- 1 mysql mysql 209 Dec 20 03:01 mysql-bin.000002
-rw-rw---- 1 mysql mysql 38 Dec 20 02:49 mysql-bin.index
-rw-rw---- 1 mysql root 3472 Dec 20 03:01 node2.wang.com.err
drwx------ 2 mysql mysql 4096 Dec 20 02:22 performance_schema
drwx------ 2 mysql root 4096 Dec 20 02:22 test
#启动mysql服务
[root@node1 ~]# service mysqld start
Starting MySQL....... [ OK ]
[root@node1 ~]# mysql
mysql> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mydb |
| mysql |
| performance_schema |
| test |
+--------------------+
5 rows in set (0.01 sec)
说明:查看节点1上是否有刚我们在节点2上创建的mydb数据库
此时mysql的高可能集群都以完成,但都是我们手动操作的,那么如何让它自动切换呢
要把以下三个资源作为高可用资源
mysqld
drbd的web资源
/dev/drbd0 /web 的挂载
九、安装配置corosync
准备以下安装包
cluster-glue-1.0.6-1.6.el5.i386.rpm
cluster-glue-libs-1.0.6-1.6.el5.i386.rpm
corosync-1.2.7-1.1.el5.i386.rpm
corosynclib-1.2.7-1.1.el5.i386.rpm
heartbeat-3.0.3-2.3.el5.i386.rpm
heartbeat-libs-3.0.3-2.3.el5.i386.rpm
libesmtp-1.0.4-5.el5.i386.rpm
mysql-5.5.19-linux2.6-i686.tar.gz
openais-1.1.3-1.6.el5.i386.rpm
openaislib-1.1.3-1.6.el5.i386.rpm
pacemaker-1.0.11-1.2.el5.i386.rpm
pacemaker-libs-1.0.11-1.2.el5.i386.rpm
perl-TimeDate-1.16-5.el5.noarch.rpm
resource-agents-1.0.4-1.1.el5.i386.rpm
1、#安装
[root@node1 ~]# yum --nogpgcheck -y localinstall *.rpm
[root@node1 ~]# scp *.rpm node2:/root
[root@node2 ~]# yum --nogpgcheck -y localinstall *.rpm
2、配置文件
[root@node1 corosync]# cp corosync.conf.example corosync.conf
[root@node1 corosync]# vim corosync.conf
totem {
version: 2
secauth: on
threads: 0
interface {
ringnumber: 0
bindnetaddr: 172.16.1.0
mcastaddr: 226.94.1.1
mcastport: 5405
}
}
logging {
fileline: off
to_stderr: no
to_logfile: yes
to_syslog: yes
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}
amf {
mode: disabled
}
amf {
mode: disabled
}
service {
ver: 0
name: pacemaker
}
aisexec {
user: root
group:root
}
#创建corosync的认证密钥
[root@node1 corosync]# corosync-keygen
#将配置文件拷贝到节点2上
[root@node1 corosync]# scp -p authkey corosync.conf node2:/etc/corosync
#创建日志存放目录
[root@node1 corosync]# mkdir /var/log/cluster
[root@node1 corosync]# ssh node2 'mkdir /var/log/cluster'
3、#启动corosync服务
[root@node1 ~]# service corosync start
Starting Corosync Cluster Engine (corosync): [ OK ]
#查看日志信息
[root@node1 ~]# tail /var/log/cluster/corosync.log
#启动节点2的corosync服务
[root@node1 ~]# ssh node2 'service corosync start'
Starting Corosync Cluster Engine (corosync): [ OK ]
#查看服务启动状态
[root@node1 ~]# crm status
============
Last updated: Tue Dec 20 04:00:18 2011
Stack: openais
Current DC: node1.wang.com - partition with quorum
Version: 1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87
2 Nodes configured, 2 expected votes
0 Resources configured.
============
Online: [ node1.wang.com node2.wang.com ]
4、配置资源
#查看当前配置信息
[root@node1 ~]# crm
crm(live)# configure show
INFO: building help index
node node1.wang.com
node node2.wang.com
property $id="cib-bootstrap-options" \
dc-version="1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87" \
cluster-infrastructure="openais" \
expected-quorum-votes="2"
crm(live)# configure
crm(live)configure# property stonith-enabled=false #禁用stonith设备
crm(live)configure# verify
#验证是否出错
crm(live)configure# property no-quorum-policy=ignore #忽略法定票数
crm(live)configure# verify
crm(live)configure# rsc_defaults resource-stickiness=100 #配置资源粘性值
crm(live)configure# verify
crm(live)configure# commit
#提交操作
crm(live)configure# show
#查看操作是否生效
node node1.wang.com
node node2.wang.com
property $id="cib-bootstrap-options" \
dc-version="1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87" \
cluster-infrastructure="openais" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
配置drbd集群资源
#先关闭所有要添加高可用集群的资源,并开机不能自动启动
[root@node1 ~]# service mysqld stop
Shutting down MySQL.... [ OK ]
[root@node1 ~]# chkconfig mysqld off
[root@node1 ~]# umount /dev/drbd0
[root@node1 ~]# drbdadm secondary web
[root@node1 ~]# service drbd stop
Stopping all DRBD resources: .
[root@node1 ~]# chkconfig drbd off
说明:在节点2上做同样的操作
#开始定义资源
[root@node1 ~]# crm
crm(live)# configure
crm(live)configure# primitive webdrbd ocf:linbit:drbd params drbd_resource=web
crm(live)configure# master ms_webdrbd webdrbd meta master-max=1 master-nodes-max=1 clone-max=2 clone-nodes-max=1 notify=true
crm(live)configure# verify
crm(live)configure# commit
#查看定义的资源
crm(live)configure# show
node node1.wang.com
node node2.wang.com
primitive webdrbd ocf:linbit:drbd \
params drbd_resource="web"
ms ms_webdrbd webdrbd \
meta master-max="1" master-nodes-max="1" clone-max="2" clone-nodes-max="1" notify="true"
property $id="cib-bootstrap-options" \
dc-version="1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87" \
cluster-infrastructure="openais" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
#查看状态
crm(live)# status
============
Last updated: Tue Dec 20 04:31:58 2011
Stack: openais
Current DC: node1.wang.com - partition with quorum
Version: 1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87
2 Nodes configured, 2 expected votes
1 Resources configured.
============
Online: [ node1.wang.com node2.wang.com ]
Master/Slave Set: ms_webdrbd
Masters: [ node1.wang.com ]
Slaves: [ node2.wang.com ]
说明:此时节点1为主节点
#切换主节点
[root@node1 ~]# crm node standby
[root@node1 ~]# crm status
============
Last updated: Tue Dec 20 04:48:11 2011
Stack: openais
Current DC: node1.wang.com - partition with quorum
Version: 1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87
2 Nodes configured, 2 expected votes
1 Resources configured.
============
Node node1.wang.com: standby
Online: [ node2.wang.com ]
Master/Slave Set: ms_webdrbd
Masters: [ node2.wang.com ]
#节点2为主节点
Stopped: [ webdrbd:0 ]
#节点1是standby因此只有主节点
#在节点2上查看主次状态
[root@node2 ~]# drbd-overview
0:web WFConnection Primary/Unknown UpToDate/Outdated C r----
#让节点1上线
[root@node1 ~]# crm node online
[root@node1 ~]# crm status
============
Last updated: Tue Dec 20 04:51:45 2011
Stack: openais
Current DC: node1.wang.com - partition with quorum
Version: 1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87
2 Nodes configured, 2 expected votes
1 Resources configured.
============
Online: [ node1.wang.com node2.wang.com ]
Master/Slave Set: ms_webdrbd
Masters: [ node2.wang.com ]
Slaves: [ node1.wang.com ]
在主节点上定义资源,此时主节点在节点2上
[root@node2 ~]# crm
crm(live)# configure
crm(live)configure# primitive webfs ocf:heartbeat:Filesystem params device="/dev/drbd0" directory="/web" fstype="ext3"
定义资源约束
#定义位置约束
crm(live)configure# colocation webfs_on_ms_webdrbd inf: webfs ms_webdrbd:Master
#定义顺序约束
crm(live)configure# order webfs_after_ms_webdrbd inf: ms_webdrbd:promote webfs:start
#定义排列约束
crm(live)configure# verify
crm(live)configure# commit
crm(live)configure# show
node node1.wang.com \
attributes standby="off"
node node2.wang.com
primitive webdrbd ocf:linbit:drbd \
params drbd_resource="web"
primitive webfs ocf:heartbeat:Filesystem \
params device="/dev/drbd0" directory="/web" fstype="ext3"
ms ms_webdrbd webdrbd \
meta master-max="1" master-nodes-max="1" clone-max="2" clone-nodes-max="1" notify="true"
location drbd-fence-by-handler-ms_webdrbd ms_webdrbd \
rule $id="drbd-fence-by-handler-rule-ms_webdrbd" $role="Master" -inf: #uname ne node2.wang.com
colocation webfs_on_ms_webdrbd inf: webfs ms_webdrbd:Master
order webfs_after_ms_webdrbd inf: ms_webdrbd:promote webfs:start
property $id="cib-bootstrap-options" \
dc-version="1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87" \
cluster-infrastructure="openais" \
expected-quorum-votes="2" \
stonith-enabled="false" \
no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
resource-stickiness="100"
#查看资源运行状态
crm(live)# status
============
Last updated: Tue Dec 20 05:08:12 2011
Stack: openais
Current DC: node1.wang.com - partition with quorum
Version: 1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87
2 Nodes configured, 2 expected votes
2 Resources configured.
============
Online: [ node1.wang.com node2.wang.com ]
Master/Slave Set: ms_webdrbd
Masters: [ node2.wang.com ]
Slaves: [ node1.wang.com ]
webfs
(ocf::heartbeat:Filesystem):
Started node2.wang.com
#定义mysql 集群资源
crm(live)# configure
crm(live)configure# primitive webmysqld lsb:mysqld
crm(live)configure# order mysqld_after_webfs inf: webfs:start webmysqld:start
crm(live)configure# colocation mysqld_on_webfs inf: webmysqld webfs
crm(live)configure# verify
crm(live)configure# commit
crm(live)# status
============
Last updated: Tue Dec 20 05:23:48 2011
Stack: openais
Current DC: node1.wang.com - partition with quorum
Version: 1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87
2 Nodes configured, 2 expected votes
3 Resources configured.
============
Online: [ node1.wang.com node2.wang.com ]
Master/Slave Set: ms_webdrbd
Masters: [ node2.wang.com ]
Slaves: [ node1.wang.com ]
webfs
(ocf::heartbeat:Filesystem):
Started node2.wang.com
crm(live)# status
============
Last updated: Tue Dec 20 05:24:17 2011
Stack: openais
Current DC: node1.wang.com - partition with quorum
Version: 1.0.11-1554a83db0d3c3e546cfd3aaff6af1184f79ee87
2 Nodes configured, 2 expected votes
3 Resources configured.
============
Online: [ node1.wang.com node2.wang.com ]
Master/Slave Set: ms_webdrbd
Masters: [ node2.wang.com ]
Slaves: [ node1.wang.com ]
webfs
(ocf::heartbeat:Filesystem):
Started node2.wang.com
webmysqld
(lsb:mysqld):
Started node2.wang.com
说明:此时mysql资源在主节点上启动起来
此时切换节点查看资源状态
mysql基于brbd+corosync的高可用集群就完成了