corosync+drbd+pacemaker实现mysql服务器的HA集群

案例应用拓扑图

image

一、准备工作

1.接口ip配置

node1.a.com 接口ip配置

clip_image002

node2.a.com接口ip配置

clip_image004

2.主机名修改

node1.a.com的主机名修改

# vim /etc/sysconfig/network

NETWORKING=yes

NETWORKING_IPV6=no

HOSTNAME=node1.a.com

#hostname node1.a.com (需重启才生效)

# vim /etc/hosts

192.168.10.81 node1.a.com  node1

192.168.10.82 node2.a.com   node2

[root@node1 ~]# ifconfig

eth0 Link encap:Ethernet HWaddr 00:0C:29:76:F3:78

inet addr:192.168.10.81 Bcast:192.168.10.255 Mask:255.255.255.0

node2.a.com的主机名修改

# vim /etc/sysconfig/network

NETWORKING=yes

NETWORKING_IPV6=no

HOSTNAME=node2.a.com

# hostname node2.a.com (需重启才生效)

# vim /etc/hosts

192.168.10.81 node1.a.com   node1

192.168.10.82 node2.a.com  node2

[root@node2 ~]# ifconfig

eth0 Link encap:Ethernet HWaddr 00:0C:29:94:6D:04

inet addr:192.168.10.82 Bcast:192.168.10.255 Mask:255.255.255.0

[root@node2 ~]# ping node1.a.com
PING node1.a.com (192.168.10.81) 56(84) bytes of data.
64 bytes from node1.a.com (192.168.10.81): icmp_seq=1 ttl=64 time=2.14 ms
64 bytes from node1.a.com (192.168.10.81): icmp_seq=2 ttl=64 time=0.339 ms
64 bytes from node1.a.com (192.168.10.81): icmp_seq=3 ttl=64 time=0.364 ms

3.修改系统时间(两个都需要执行)

# hwclock -s

# date

4.配置ssh密钥信息(两个都需要执行,方便以后拷贝,无需每次都输入密码)

# ssh-keygen -t rsa  //一直输入空格

[root@node1 ~]# ssh-keygen -t rsa

[root@node1 ~]# ssh-copy-id -i .ssh/id_rsa.pub node2

[root@node2 ~]# ssh-keygen -t rsa

[root@node2 ~]# ssh-copy-id -i .ssh/id_rsa.pub node1 (输入yes,输入密码)

[root@node1 ~]# ssh node1 'ifconfig' //输入yes,输入密码将看到node1上的信息

5.下载软件包到root下。
[root@node1 ~]# ll
总计 162104
-rw------- 1 root root      1175 08-30 04:50 anaconda-ks.cfg
-rw-r--r-- 1 root root    133254 10-19 20:04 cluster-glue-libs-1.0.6-1.6.el5.i386.rpm
-rw-r--r-- 1 root root    170052 10-19 20:04 corosync-1.2.7-1.1.el5.i386.rpm
-rw-r--r-- 1 root root    158502 10-19 20:04 corosynclib-1.2.7-1.1.el5.i386.rpm
drwxr-xr-x 2 root root      4096 10-19 20:05 Desktop
-rw-r--r-- 1 root root    221868 10-19 20:04 drbd83-8.3.8-1.el5.centos.i386.rpm
-rw-r--r-- 1 root root    165591 10-19 20:04 heartbeat-3.0.3-2.3.el5.i386.rpm
-rw-r--r-- 1 root root    289600 10-19 20:04 heartbeat-libs-3.0.3-2.3.el5.i386.rpm
-rw-r--r-- 1 root root     35236 08-30 04:50 install.log
-rw-r--r-- 1 root root      3995 08-30 04:49 install.log.syslog
-rw-r--r-- 1 root root    125974 10-19 20:04 kmod-drbd83-8.3.8-1.el5.centos.i686.rpm
-rw-r--r-- 1 root root     44377 10-19 20:03 ldirectord-1.0.1-1.el5.i386.rpm
-rw-r--r-- 1 root root     60458 10-19 20:03 libesmtp-1.0.4-5.el5.i386.rpm
-rw-r--r-- 1 root root 162247449 10-19 20:03 mysql-5.5.15-linux2.6-i686.tar.gz
-rw-r--r-- 1 root root    207085 10-19 20:02 openais-1.1.3-1.6.el5.i386.rpm
-rw-r--r-- 1 root root     94614 10-19 20:02 openaislib-1.1.3-1.6.el5.i386.rpm
-rw-r--r-- 1 root root    796813 10-19 20:02 pacemaker-1.1.5-1.1.el5.i386.rpm
-rw-r--r-- 1 root root    207925 10-19 20:02 pacemaker-cts-1.1.5-1.1.el5.i386.rpm
-rw-r--r-- 1 root root    332026 10-19 20:02 pacemaker-libs-1.1.5-1.1.el5.i386.rpm
-rw-r--r-- 1 root root     32818 10-19 20:02 perl-TimeDate-1.16-5.el5.noarch.rpm
-rw-r--r-- 1 root root    388632 10-19 20:02 resource-agents-1.0.4-1.1.el5.i386.rpm

[root@node1 ~]# rm ldirectord-1.0.1-1.el5.i386.rpm //我们用不到这个文件,所以删了。

[root@node1 ~]# scp /root/*.rpm node2:/root/

[root@node1 ~]# scp mysql-5.5.15-linux2.6-i686.tar.gz node2:/root/

6.配置yum

[root@node1 ~]# vim /etc/yum.repos.d/rhel-debuginfo.repo

[rhel-server]
name=Red Hat Enterprise Linux server
baseurl=file:///mnt/cdrom/Server
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release

[rhel-vt]
name=Red Hat Enterprise Linux vt
baseurl=file:///mnt/cdrom/VT
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release

[rhel-cluster]
name=Red Hat Enterprise Linux cluster
baseurl=file:///mnt/cdrom/Cluster
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release

[rhel-clustersrorage]
name=Red Hat Enterprise Linux clusterstorage
baseurl=file:///mnt/cdrom/ClusterStorage
enabled=1
gpgcheck=1
gpgkey=file:///mnt/cdrom/RPM-GPG-KEY-redhat-release

image

[root@node1 ~]# yum list all
[root@node1 ~]# mkdir /mnt/cdrom
[root@node1 ~]# mount /dev/cdrom /mnt/cdrom/

[root@node1 ~]# scp /etc/yum.repos.d/rhel-debuginfo.repo node2:/etc/yum.repos.d/

[root@node1 ~]# ssh node2 'mkdir /mnt/cdrom'

[root@node1 ~]# ssh node2 'mount /dev/cdrom /mnt/cdrom'

[root@node1 ~]# ssh node2 'yum list all'

7.新增磁盘(2个节点上都需执行)

[root@node1 ~]# fdisk /dev/sda

The number of cylinders for this disk is set to 2610.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
   (e.g., DOS FDISK, OS/2 FDISK)

Command (m for help): p

Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *           1          13      104391   83  Linux
/dev/sda2              14        1288    10241437+  83  Linux
/dev/sda3            1289        1352      514080   82  Linux swap / Solaris

Command (m for help): n (添加一个新磁盘)
Command action
   e   extended
   p   primary partition (1-4)
p (设为主分区)
Selected partition 4
First cylinder (1353-2610, default 1353):
Using default value 1353
Last cylinder or +size or +sizeM or +sizeK (1353-2610, default 2610): +1000m (磁盘大小)

Command (m for help): p (显示)

Disk /dev/sda: 21.4 GB, 21474836480 bytes
255 heads, 63 sectors/track, 2610 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes

   Device Boot      Start         End      Blocks   Id  System
/dev/sda1   *           1          13      104391   83  Linux
/dev/sda2              14        1288    10241437+  83  Linux
/dev/sda3            1289        1352      514080   82  Linux swap / Solaris
/dev/sda4            1353        1475      987997+  83  Linux

Command (m for help): w (保存)
The partition table has been altered!

Calling ioctl() to re-read partition table.

WARNING: Re-reading the partition table failed with error 16: 设备或资源忙.
The kernel still uses the old table.
The new table will be used at the next reboot.
Syncing disks.

8.重新加载磁盘(两个节点都要执行)

[root@node1 drbd.d]# partprobe /dev/sda
[root@node1 drbd.d]# cat /proc/partitions
major minor  #blocks  name

   8     0   20971520 sda
   8     1     104391 sda1
   8     2   10241437 sda2
   8     3     514080 sda3
   8     4     987997 sda4

二、DRBD安装配置步骤

1.安装DRBD套件

[root@node1 ~]# yum localinstall -y drbd83-8.3.8-1.el5.centos.i386.rpm kmod-drbd83-8.3.8-1.el5.centos.i686.rpm �Cnogpgcheck

[root@node2 ~]# yum localinstall -y drbd83-8.3.8-1.el5.centos.i386.rpm kmod-drbd83-8.3.8-1.el5.centos.i686.rpm �Cnogpgcheck

2.加载DRBD模块

[root@node1 ~]# modprobe drbd
[root@node1 ~]# lsmod |grep drbd
drbd                  228528  0
[root@node1 ~]# ssh node2 'modprobe drbd'
[root@node1 ~]# ssh node2 'lsmod |grep drbd'
drbd                  228528  0

3.修改配置文件

[root@node1 ~]# cp /usr/share/doc/drbd83-8.3.8/drbd.conf /etc/
cp:是否覆盖“/etc/drbd.conf”? y
[root@node1 ~]# vim /etc/drbd.conf

image

[root@node1 ~]# cd /etc/drbd.d/
[root@node1 drbd.d]# ll
总计 4
-rwxr-xr-x 1 root root 1418 2010-06-04 global_common.conf
[root@node1 drbd.d]# cp global_common.conf global_common.conf.bak  //最好做个备份

3.1修改全局配置文件

[root@node1 drbd.d]# vim global_common.conf

global {
        usage-count no;  //改成no
        # minor-count dialog-refresh disable-ip-verification
}

common {
        protocol C;

        startup {
                wfc-timeout  120;
                degr-wfc-timeout 120;
         }
        disk {
                  on-io-error detach;
                  fencing resource-only;

          }
        net {
                cram-hmac-alg "sha1";
                shared-secret  "mydrbdlab";
         }
        syncer {
                  rate  100M;
         }

}

3.2修改资源配置文件

[root@node1 drbd.d]# vim /etc/drbd.d/mysql.res

resource  mysql {
        on node1.a.com {
        device   /dev/drbd0;
        disk    /dev/sda4;
        address  192.168.10.81:7789;
        meta-disk       internal;
        }

        on node2.a.com {
        device   /dev/drbd0;
        disk    /dev/sda4;
        address  192.168.10.82:7789;
        meta-disk       internal;
        }
}

3.3复制配置到node2上

[root@node1 drbd.d]# scp /etc/drbd.conf node2:/etc/
drbd.conf                                                     100%  133     0.1KB/s   00:00   
[root@node1 drbd.d]# scp /etc/drbd.d/* node2:/etc/drbd.d/
global_common.conf                                            100%  503     0.5KB/s   00:00   
global_common.conf.bak                                        100% 1418     1.4KB/s   00:00   
mysql.res                                                     100%  352     0.3KB/s   00:00  

3.4检测配置文件,创建nfs资源(两个节点都要做)

[root@node1 drbd.d]# drbdadm adjust mysql
0: Failure: (119) No valid meta-data signature found.

    ==> Use 'drbdadm create-md res' to initialize meta-data area. <==

Command 'drbdsetup 0 disk /dev/sda4 /dev/sda4 internal --set-defaults --create-device --fencing=resource-only --on-io-error=detach' terminated with exit code 10
[root@node1 drbd.d]# drbdadm adjust mysql
drbdsetup 0 show:5: delay-probe-volume 0k => 0k out of range [4..1048576]k.

[root@node1 drbd.d]# drbdadm create-md mysql
Writing meta data...
initializing activity log
NOT initialized bitmap
New drbd meta data block successfully created.

3.5启动DRBD服务,查看DRBD状态

[root@node1 drbd.d]# service drbd start
Starting DRBD resources: drbdsetup 0 show:5: delay-probe-volume 0k => 0k out of range [4..1048576]k.
[root@node1 drbd.d]# service drbd restart
Restarting all DRBD resources: .
[root@node2 drbd.d]# service drbd start
Starting DRBD resources: drbdsetup 0 show:5: delay-probe-volume 0k => 0k out of range [4..1048576]k.

[root@node2 drbd.d]# service drbd restart
Restarting all DRBD resources: .

[root@node1 drbd.d]# service drbd status
drbd driver loaded OK; device status:
version: 8.3.8 (api:88/proto:86-94)
GIT-hash: d78846e52224fd00562f7c225bcc25b2d422321d build by [email protected], 2010-06-04 08:04:16
m:res    cs         ro                   ds                         p  mounted  fstype
0:mysql  Connected  Secondary/Secondary  Inconsistent/Inconsistent  C
[root@node2 drbd.d]# service drbd status
drbd driver loaded OK; device status:
version: 8.3.8 (api:88/proto:86-94)
GIT-hash: d78846e52224fd00562f7c225bcc25b2d422321d build by [email protected], 2010-06-04 08:04:16
m:res    cs         ro                   ds                         p  mounted  fstype
0:mysql  Connected  Secondary/Secondary  Inconsistent/Inconsistent  C

或者使用drbd-overview查看DRBD状态

[root@node1 drbd.d]# drbd-overview
  0:mysql  Connected Secondary/Secondary Inconsistent/Inconsistent C r----

[root@node2 drbd.d]# drbd-overview
  0:mysql  Connected Secondary/Secondary Inconsistent/Inconsistent C r----

或者drbdadm role mysql命令查看。

3.5设置DRBD的主节点

从以上信息我们可以看出此时两个节点均处于Secondary状态,接下来,我们仅将node2设为主节点

[root@node2 drbd.d]# drbdadm -- --overwrite-data-of-peer primary mysql

[root@node2 drbd.d]# drbd-overview
  0:mysql  SyncSource Primary/Secondary UpToDate/Inconsistent C r----
    [=>..................] sync'ed: 10.8% (888536/987928)K delay_probe: 7

[root@node2 drbd.d]# drbd-overview
  0:mysql  SyncSource Primary/Secondary UpToDate/Inconsistent C r----
    [==============>.....] sync'ed: 76.5% (234936/987928)K delay_probe: 59

[root@node2 drbd.d]# drbd-overview
  0:mysql  Connected Primary/Secondary UpToDate/UpToDate C r----

[root@node2 drbd.d]# cat /proc/drbd
version: 8.3.8 (api:88/proto:86-94)
GIT-hash: d78846e52224fd00562f7c225bcc25b2d422321d build by [email protected], 2010-06-04 08:04:16
0: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r----
    ns:987928 nr:0 dw:0 dr:987928 al:0 bm:61 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:0

[root@node1 drbd.d]# drbd-overview
  0:mysql  Connected Secondary/Primary UpToDate/UpToDate C r----

注:Primary/Secondary 说明当前节点为主节点;Secondary/Primary 说明当前节点为从节点。使用:watch -n 1 'cat /proc/drbd'查看同步过程!

3.6创建文件系统(只可以在primary节点上进行,这里在node2上创建)

[root@node2 drbd.d]# mkfs -t ext3 /dev/drbd0

[root@node2 drbd.d]# mkdir -pv /mnt/mysqldata
mkdir: 已创建目录 “/mnt/mysqldata”

[root@node2 drbd.d]# ssh node1 'mkdir -pv /mnt/mysqldata'
mkdir: 已创建目录 “/mnt/mysqldata”
[root@node2 drbd.d]# mount /dev/drbd0 /mnt/mysqldata/
[root@node2 drbd.d]# cd /mnt/mysqldata/
[root@node2 mysqldata]# ll
总计 16
drwx------ 2 root root 16384 10-19 23:45 lost+found

[root@node2 mysqldata]# echo "zhaoke">zhaoke
[root@node2 mysqldata]# touch uodekoaix
[root@node2 mysqldata]# ll
总计 20
drwx------ 2 root root 16384 10-19 23:45 lost+found
-rw-r--r-- 1 root root     0 10-19 23:48 uodekoaix
-rw-r--r-- 1 root root     7 10-19 23:48 zhaoke

3.7将node1设置为primary

[root@node2 ~]# umount /mnt/mysqldata/
[root@node2 ~]# drbdadm secondary mysql
[root@node2 ~]# drbd-overview
  0:mysql  Connected Secondary/Secondary UpToDate/UpToDate C r----

[root@node1 drbd.d]# drbd-overview
  0:mysql  Connected Secondary/Secondary UpToDate/UpToDate C r----

[root@node1 drbd.d]# drbdadm primary mysql
[root@node1 drbd.d]# drbd-overview
  0:mysql  Connected Primary/Secondary UpToDate/UpToDate C r----

[root@node1 drbd.d]# mount /dev/drbd0 /mnt/mysqldata/
[root@node1 drbd.d]# cd /mnt/mysqldata/
[root@node1 mysqldata]# ll
总计 20
drwx------ 2 root root 16384 10-19 23:45 lost+found
-rw-r--r-- 1 root root     0 10-19 23:48 uodekoaix
-rw-r--r-- 1 root root     7 10-19 23:48 zhaoke

3.8此处看到在node2上创建的文件在这里都显示了,最后将node1上/mnt/mysqldata/卸载

[root@node1 mysqldata]# cd
[root@node1 ~]# umount /mnt/mysqldata/

至此DRBD配置成功!

四、mysql安装与配置

1.此时node1设为主节点,node2设为备份节点。在node1.a.com上安装mysql

[root@node1 ~]# groupadd -r mysql
[root@node1 ~]# useradd -g mysql -r mysql
[root@node1 ~]# drbd-overview
  0:mysql  Connected Primary/Secondary UpToDate/UpToDate C r----
[root@node1 ~]# mount /dev/drbd0 /mnt/mysqldata/
[root@node1 ~]# mkdir -pv /mnt/mysqldata/data
mkdir: 已创建目录 “/mnt/mysqldata/data”
[root@node1 ~]# chown -R mysql.mysql /mnt/mysqldata/data
[root@node1 ~]# ll /mnt/mysqldata/
总计 24
drwxr-xr-x 2 mysql mysql  4096 10-20 00:01 data
drwx------ 2 root  root  16384 10-19 23:45 lost+found
-rw-r--r-- 1 root  root      0 10-19 23:48 uodekoaix
-rw-r--r-- 1 root  root      7 10-19 23:48 zhaoke
[root@node1 ~]# tar -zxvf mysql-5.5.15-linux2.6-i686.tar.gz -C /usr/local/

[root@node1 ~]# cd /usr/local/
[root@node1 local]# ln -sv mysql-5.5.15-linux2.6-i686/ mysql
创建指向“mysql-5.5.15-linux2.6-i686/”的符号链接“mysql”
[root@node1 local]# cd mysql
[root@node1 mysql]# chown -R root:mysql .

[root@node1 mysql]# scripts/mysql_install_db --user=mysql --datadir=/mnt/mysqldata/data/

[root@node1 mysql]# chown -R root .
[root@node1 mysql]# cp support-files/my-large.cnf /etc/my.cnf
[root@node1 mysql]# vim /etc/my.cnf

39 thread_concurrency = 2
40 datadir = /mnt/mysqldata/data/ #指定mysql数据文件的存放位置(添加)

为mysql提供sysv服务脚本,使其能使用service命令:

[root@node1 mysql]# cp support-files/mysql.server /etc/rc.d/init.d/mysqld
[root@node1 mysql]# scp /etc/my.cnf node2:/etc/
my.cnf                                                        100% 4693     4.6KB/s   00:00 

[root@node1 mysql]# scp /etc/rc.d/init.d/mysqld node2:/etc/rc.d/init.d/
mysqld                                                        100%   10KB  10.4KB/s   00:00 

[root@node1 mysql]# chkconfig --add mysqld
[root@node1 mysql]# chkconfig mysqld off
[root@node1 mysql]# chkconfig --list mysqld
mysqld             0:关闭    1:关闭    2:关闭    3:关闭    4:关闭    5:关闭    6:关闭
[root@node1 mysql]# service mysqld start
Starting MySQL.........                                    [确定]

为了使用mysql的安装符合系统使用规范,并将其开发组件导出给系统使用,这里还需要进行如下步骤:

输出mysql的man手册至man命令的查找路径:添加如下行即可:
[root@node1 mysql]# vim /etc/man.config
48行添加 MANPATH /usr/local/mysql/man

输出mysql的头文件至系统头文件路径/usr/include,这可以通过简单的创建链接实现:

[root@node1 mysql]# ln -sv /usr/local/mysql/include/ /usr/include/mysql
创建指向“/usr/local/mysql/include/”的符号链接“/usr/include/mysql”

输出mysql的库文件给系统库查找路径:(文件只要是在/etc/ld.so.conf.d/下并且后缀是.conf就可以)而后让系统重新载入系统库

[root@node1 mysql]# echo '/usr/local/mysql/lib/'>> /etc/ld.so.conf.d/mysql.conf
[root@node1 mysql]# ldconfig -v |grep mysql

/usr/local/mysql/lib:
    libmysqlclient.so.18 -> libmysqlclient_r.so.18.0.0

修改PATH环境变量,让系统所有用户可以直接使用mysql的相关命令:

[root@node1 mysql]# vim /etc/profile
59 行PATH=$PATH:/usr/local/mysql/bin #添加
[root@node1 mysql]# . /etc/profile

[root@node1 mysql]# echo $PATH
/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin

卸载drbd设备

[root@node1 ~]# umount /mnt/mysqldata/(如果卸载不掉,则重启node1的系统,哈哈)

重启后会发现

[root@node1 ~]# drbd-overview
  0:mysql  Connected Secondary/Primary UpToDate/UpToDate C r----

2.将node2设为主节点,node1设为备份节点,在node2.a.com安装mysql

[root@node2 ~]# drbdadm primary mysql
[root@node2 ~]# drbd-overview
  0:mysql  Connected Primary/Secondary UpToDate/UpToDate C r----

[root@node2 ~]# drbdadm role mysql
Primary/Secondary

3.在node2.a.com上安装mysql

添加用户和组

[root@node2 ~]# groupadd -r mysql
[root@node2 ~]# useradd -g mysql -r mysql

挂载drbd设备

[root@node2 ~]# mount /dev/drbd0 /mnt/mysqldata/

[root@node2 ~]# ll /mnt/mysqldata/
总计 24
drwxr-xr-x 5 mysql mysql  4096 10-20 00:27 data
drwx------ 2 root  root  16384 10-19 23:45 lost+found
-rw-r--r-- 1 root  root      0 10-19 23:48 uodekoaix
-rw-r--r-- 1 root  root      7 10-19 23:48 zhaoke

[root@node2 ~]# ll /mnt/mysqldata/data/
总计 28752
-rw-rw---- 1 mysql mysql 18874368 10-20 00:14 ibdata1
-rw-rw---- 1 mysql mysql  5242880 10-20 00:27 ib_logfile0
-rw-rw---- 1 mysql mysql  5242880 10-20 00:14 ib_logfile1
drwx------ 2 mysql root      4096 10-20 00:07 mysql
-rw-rw---- 1 mysql mysql      107 10-20 00:14 mysql-bin.000001
-rw-rw---- 1 mysql mysql      107 10-20 00:27 mysql-bin.000002
-rw-rw---- 1 mysql mysql       38 10-20 00:27 mysql-bin.index
-rw-rw---- 1 mysql root      3183 10-20 00:27 node1.a.com.err
-rw-rw---- 1 mysql mysql        5 10-20 00:27 node1.a.com.pid
drwx------ 2 mysql mysql     4096 10-20 00:08 performance_schema
drwx------ 2 mysql root      4096 10-20 00:06 test

[root@node2 ~]# tar -zxvf mysql-5.5.15-linux2.6-i686.tar.gz -C /usr/local/

[root@node2 ~]# cd /usr/local/
[root@node2 local]# ln -sv mysql-5.5.15-linux2.6-i686/ mysql
创建指向“mysql-5.5.15-linux2.6-i686/”的符号链接“mysql”

[root@node2 local]# cd mysql

一定不能对数据库进行初始化,因为我们在node1上已经初始化了:
[root@node2 mysql]# chown -R root:mysql .

mysql主配置文件和sysc服务脚本已经从node1复制过来了,不用在添加。
管理mysql服务:
[root@node2 mysql]# chkconfig --add mysqld
[root@node2 mysql]# chkconfig mysqld off
[root@node2 mysql]# chkconfig --list mysqld
mysqld             0:关闭    1:关闭    2:关闭    3:关闭    4:关闭    5:关闭    6:关闭

启动mysql服务
[root@node2 mysql]# service mysqld start
Starting MySQL.....                                        [确定]

[root@node2 mysql]# ll /mnt/mysqldata/data/
总计 28764
-rw-rw---- 1 mysql mysql 18874368 10-20 00:14 ibdata1
-rw-rw---- 1 mysql mysql  5242880 10-20 00:59 ib_logfile0
-rw-rw---- 1 mysql mysql  5242880 10-20 00:14 ib_logfile1
drwx------ 2 mysql root      4096 10-20 00:07 mysql
-rw-rw---- 1 mysql mysql      107 10-20 00:14 mysql-bin.000001
-rw-rw---- 1 mysql mysql      107 10-20 00:27 mysql-bin.000002
-rw-rw---- 1 mysql mysql      107 10-20 00:59 mysql-bin.000003
-rw-rw---- 1 mysql mysql       57 10-20 00:59 mysql-bin.index
-rw-rw---- 1 mysql root      3183 10-20 00:27 node1.a.com.err
-rw-rw---- 1 mysql mysql        5 10-20 00:27 node1.a.com.pid
-rw-rw---- 1 mysql root      1464 10-20 00:59 node2.a.com.err
-rw-rw---- 1 mysql mysql        5 10-20 00:59 node2.a.com.pid
drwx------ 2 mysql mysql     4096 10-20 00:08 performance_schema
drwx------ 2 mysql root      4096 10-20 00:06 test

测试之后关闭服务:

[root@node2 mysql]# service mysqld stop
Shutting down MySQL..                                      [确定]

为了使用mysql的安装符合系统使用规范,并将其开发组件导出给系统使用,这里还需要进行如下步骤:

输出mysql的man手册至man命令的查找路径:添加如下行即可:

[root@node2 mysql]# vim /etc/man.config

48行添加 MANPATH /usr/local/mysql/man
[root@node2 mysql]# ln -sv /usr/local/mysql/include/ /usr/include/mysql
创建指向“/usr/local/mysql/include/”的符号链接“/usr/include/mysql”

[root@node2 mysql]# echo '/usr/local/mysql/lib/'>>/etc/ld.so.conf.d/mysql.conf
[root@node2 mysql]# ldconfig -v |grep mysql
/usr/local/mysql/lib:
    libmysqlclient.so.18 -> libmysqlclient_r.so.18.0.0

修改PATH环境变量,让系统所有用户可以直接使用mysql的相关命令:

[root@node2 mysql]# vim /etc/profile

59 行添加 PATH=$PATH:/usr/local/mysql/bin
[root@node2 mysql]# . /etc/profile

[root@node2 mysql]# echo $PATH
/usr/kerberos/sbin:/usr/kerberos/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin:/usr/local/mysql/bin

卸载DRBD设备

[root@node2 mysql]# umount /mnt/mysqldata/

五、corosync+pacemaker的安装和配置

1.挂载光盘并安装相应的rpm包

[root@node1 ~]# mount /dev/cdrom /mnt/cdrom/

[root@node1 ~]# yum localinstall -y *.rpm �Cnogpgcheck

[root@node2 mysql]# mount /dev/cdrom /mnt/cdrom/
[root@node2 mysql]# yum localinstall -y *.rpm �Cnogpgcheck

2.对node1和node2节点进行相应的配置。

[root@node1 ~]# cd /etc/corosync/
[root@node1 corosync]# ll
总计 20
-rw-r--r-- 1 root root 5384 2010-07-28 amf.conf.example
-rw-r--r-- 1 root root  436 2010-07-28 corosync.conf.example
drwxr-xr-x 2 root root 4096 2010-07-28 service.d
drwxr-xr-x 2 root root 4096 2010-07-28 uidgid.d
[root@node1 corosync]# cp corosync.conf.example corosync.conf

[root@node1 corosync]# vim  corosync.conf

修改第10行                 bindnetaddr: 192.168.10.0  

补充一些东西,前面只是底层的东西,因为要用pacemaker

33 service {
34         ver: 0
35         name: pacemaker
36 }

虽然用不到openais ,但是会用到一些子选项
37 aisexec {
38         user: root
39         group: root
40 }

3.创建cluster

[root@node1 corosync]# mkdir -pv /var/log/cluster
mkdir: 已创建目录 “/var/log/cluster”

4:为了方便其他主机加入该集群,需要认证,生成一authkey
[root@node1 corosync]# corosync-keygen
Corosync Cluster Engine Authentication key generator.
Gathering 1024 bits for key from /dev/random.
Press keys on your keyboard to generate entropy.
Writing corosync key to /etc/corosync/authkey.
[root@node1 corosync]# ll
总计 28
-rw-r--r-- 1 root root 5384 2010-07-28 amf.conf.example
-r-------- 1 root root  128 10-20 11:34 authkey
-rw-r--r-- 1 root root  538 10-20 11:33 corosync.conf
-rw-r--r-- 1 root root  436 2010-07-28 corosync.conf.example
drwxr-xr-x 2 root root 4096 2010-07-28 service.d
drwxr-xr-x 2 root root 4096 2010-07-28 uidgid.d

5.将node1节点上的文件拷贝到节点node2上面(记住要带-p)

[root@node1 corosync]# scp -p authkey corosync.conf node2:/etc/corosync/

[root@node1 corosync]# scp -p authkey corosync.conf node2:/etc/corosync/
authkey                                               100%  128     0.1KB/s   00:00   
corosync.conf                                         100%  539     0.5KB/s   00:00 

[root@node1 corosync]# ssh node2 'mkdir -pv /var/log/cluster'
mkdir: 已创建目录 “/var/log/cluster”

6.在node1和node2节点上检测

6.1在node1和node2节点上启动corosync服务

[root@node1 corosync]# service corosync start
Starting Corosync Cluster Engine (corosync):               [确定]
[root@node1 corosync]# ssh node2 'service corosync start'
Starting Corosync Cluster Engine (corosync): [确定]

6.2在node1上验证corosync引擎是否正常启动

[root@node1 corosync]# grep -i -e "corosync cluster engine" -e "configuration file" /var/log/messages
Aug 30 21:01:28 localhost smartd[2961]: Opened configuration file /etc/smartd.conf
Aug 30 21:01:28 localhost smartd[2961]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices
Oct 19 18:42:34 localhost smartd[3195]: Opened configuration file /etc/smartd.conf
Oct 19 18:42:34 localhost smartd[3195]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices
Oct 20 10:42:28 node1 smartd[3145]: Opened configuration file /etc/smartd.conf
Oct 20 10:42:28 node1 smartd[3145]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices
Oct 20 11:46:09 node1 corosync[3459]:   [MAIN  ] Corosync Cluster Engine ('1.2.7'): started and ready to provide service.
Oct 20 11:46:09 node1 corosync[3459]:   [MAIN  ] Successfully read main configuration file '/etc/corosync/corosync.conf'.

6.3: 在node1上查看初始化成员节点通知是否发出
[root@node1 corosync]# grep -i totem /var/log/messages
Oct 20 11:46:09 node1 corosync[3459]:   [TOTEM ] Initializing transport (UDP/IP).
Oct 20 11:46:09 node1 corosync[3459]:   [TOTEM ] Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0).
Oct 20 11:46:09 node1 corosync[3459]:   [TOTEM ] The network interface [192.168.10.81] is now up.
Oct 20 11:46:11 node1 corosync[3459]:   [TOTEM ] Process pause detected for 1132 ms, flushing membership messages.
Oct 20 11:46:11 node1 corosync[3459]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.
Oct 20 11:46:45 node1 corosync[3459]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.

6.4: 在node1上检查过程中是否有错误产生(避免stonith的错误)

[root@node1 corosync]# grep -i error: /var/log/messages |grep -v unpack_resources

6.5在node1上检测pacemaker时候已经启动了(如下显示表示正常启动了)

[root@node1 corosync]# grep -i pcmk_startup /var/log/messages
Oct 20 11:46:10 node1 corosync[3459]:   [pcmk  ] info: pcmk_startup: CRM: Initialized
Oct 20 11:46:10 node1 corosync[3459]:   [pcmk  ] Logging: Initialized pcmk_startup
Oct 20 11:46:10 node1 corosync[3459]:   [pcmk  ] info: pcmk_startup: Maximum core file size is: 4294967295
Oct 20 11:46:10 node1 corosync[3459]:   [pcmk  ] info: pcmk_startup: Service: 9
Oct 20 11:46:10 node1 corosync[3459]:   [pcmk  ] info: pcmk_startup: Local hostname: node1.a.com

6.6在node2节点上验证corosync引擎是否正常启动

[root@node2 ~]# grep -i -e "corosync cluster engine" -e "configuration file" /var/log/messages
Aug 30 21:01:28 localhost smartd[2961]: Opened configuration file /etc/smartd.conf
Aug 30 21:01:28 localhost smartd[2961]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices
Oct 19 18:46:16 localhost smartd[3171]: Opened configuration file /etc/smartd.conf
Oct 19 18:46:16 localhost smartd[3171]: Configuration file /etc/smartd.conf was parsed, found DEVICESCAN, scanning devices
Oct 20 11:46:44 localhost corosync[4204]:   [MAIN  ] Corosync Cluster Engine ('1.2.7'): started and ready to provide service.
Oct 20 11:46:44 localhost corosync[4204]:   [MAIN  ] Successfully read main configuration file '/etc/corosync/corosync.conf'.

6.7在node2节点上查看初始化成员节点通知是否发出

[root@node2 ~]# grep -i totem /var/log/messages
Oct 20 11:46:44 localhost corosync[4204]:   [TOTEM ] Initializing transport (UDP/IP).
Oct 20 11:46:44 localhost corosync[4204]:   [TOTEM ] Initializing transmit/receive security: libtomcrypt SOBER128/SHA1HMAC (mode 0).
Oct 20 11:46:45 localhost corosync[4204]:   [TOTEM ] The network interface [192.168.10.82] is now up.
Oct 20 11:46:46 localhost corosync[4204]:   [TOTEM ] A processor joined or left the membership and a new membership was formed.

6.8在node2节点上检查过程中是否有错误产生(避免stonith的错误)

[root@node2 ~]# grep -i error: /var/log/messages |grep -v unpack_resources

6.9在node2节点上检查pacemaker时候已经启动了(如下显示表示正常启动)

[root@node2 ~]# grep -i pcmk_startup /var/log/messages
Oct 20 11:46:46 localhost corosync[4204]:   [pcmk  ] info: pcmk_startup: CRM: Initialized
Oct 20 11:46:46 localhost corosync[4204]:   [pcmk  ] Logging: Initialized pcmk_startup
Oct 20 11:46:46 localhost corosync[4204]:   [pcmk  ] info: pcmk_startup: Maximum core file size is: 4294967295
Oct 20 11:46:46 localhost corosync[4204]:   [pcmk  ] info: pcmk_startup: Service: 9
Oct 20 11:46:46 localhost corosync[4204]:   [pcmk  ] info: pcmk_startup: Local hostname: node2.a.com
在node1和node2查看群集的状态

[root@node1 corosync]# crm status
============
Last updated: Sat Oct 20 12:03:07 2012
Stack: openais
Current DC: node1.a.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
0 Resources configured.
============

Online: [ node1.a.com node2.a.com ]

[root@node2 ~]# crm status
============
Last updated: Sat Oct 20 12:04:26 2012
Stack: openais
Current DC: node1.a.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
0 Resources configured.
============

Online: [ node1.a.com node2.a.com ]

六、群集管理

6.1配置群集的工作属性

corosync默认启用了stonith,而当前集群并没有相应的stonith设备,因此此默认配置目录尚可不用,可通过以下命令先禁用stonith

[root@node1 corosync]# cd
[root@node1 ~]# crm configure property stonith-enabled=false
[root@node2 ~]# crm configure property stonith-enabled=false

对于双节点的集群来说,我们要配置此选项来忽略quorum,即这时票数不起作用,一个节点也能正常运行。

[root@node2 ~]# crm configure property no-quorum-policy=ignore

[root@node2 ~]# crm configure property no-quorum-policy=ignore

定义资源的默认黏性值

[root@node1 ~]# crm configure rsc_defaults resource-stickiness=100

[root@node2 ~]# crm configure rsc_defaults resource-stickiness=100

6.2定义群集服务及资源

6.2.1改变DRBD状态

[root@node2 ~]# drbd-overview
  0:mysql  Connected Primary/Secondary UpToDate/UpToDate C r---- 
[root@node2 ~]# drbdadm secondary mysql
[root@node2 ~]# drbd-overview
  0:mysql  Connected Secondary/Secondary UpToDate/UpToDate C r----

[root@node1 ~]# drbd-overview
  0:mysql  Connected Secondary/Secondary UpToDate/UpToDate C r----
[root@node1 ~]# drbdadm primary mysql
[root@node1 ~]# drbd-overview
  0:mysql  Connected Primary/Secondary UpToDate/UpToDate C r----

6.2.2配置DRBD为群集资源

1.查看当前集群的配置信息,确保已经配置全局属性参数为两节点集群所适用

[root@node1 ~]# crm configure show
node node1.a.com
node node2.a.com
property $id="cib-bootstrap-options" \
    dc-version="1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f" \
    cluster-infrastructure="openais" \
    expected-quorum-votes="2" \
    stonith-enabled="false" \
    no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
    resource-stickiness="100"

2.将已经配置好的DRBD设备/dev/drbd0定义为集群服务

[root@node1 ~]# service drbd stop
Stopping all DRBD resources: .
[root@node1 ~]# chkconfig drbd off
[root@node1 ~]# ssh node2 'service drbd stop'
Stopping all DRBD resources: .
[root@node1 ~]# ssh node2 'chkconfig drbd off'
[root@node1 ~]# drbd-overview
drbd not loaded
[root@node1 ~]# ssh node2 'drbd-overview'
drbd not loaded

3.配置DRBD为集群资源

提供DRBD的RA目前由归类为linbit,其路径为/usr/lib/ocf/resource.d/linbit/drbd.使用以下命令查看此RA及RA的meta信息

[root@node1 ~]# crm ra classes
heartbeat
lsb
ocf / heartbeat linbit pacemaker
stonith
[root@node1 ~]# crm ra list ocf linbit
drbd  

4.查看DRBD的资源代理的相关信息  

[root@node1 ~]# crm ra info ocf:linbit:drbd
This resource agent manages a DRBD resource
as a master/slave resource. DRBD is a shared-nothing replicated storage
device. (ocf:linbit:drbd)

Master/Slave OCF Resource Agent for DRBD

Parameters (* denotes required, [] the default):

drbd_resource* (string): drbd resource name
    The name of the drbd resource from the drbd.conf file.

drbdconf (string, [/etc/drbd.conf]): Path to drbd.conf
    Full path to the drbd.conf file.

Operations' defaults (advisory minimum):

    start         timeout=240
    promote       timeout=90
    demote        timeout=90
    notify        timeout=90
    stop          timeout=100
    monitor_Slave interval=20 timeout=20 start-delay=1m
    monitor_Master interval=10 timeout=20 start-delay=1m

5.DRBD需要同时运行在两个节点上,但只能有一个节点(primary/secondary模型)是master,而另一个及诶单为slave,因为,它是一种比较特殊的集群资源,其资源类型为多状态(multi-state)clone类型,即主机节点有master和slave之分,且要求服务刚启动时两个节点都处于slave状态

[root@node1 ~]# crm
crm(live)# configure
crm(live)configure# primitive mysqldrbd ocf:heartbeat:drbd params drbd_resource="mysql" op monitor role="Master" interval="30s" op monitor role="Slave" interval="31s" op start timeout="240s" op stop timeout="100s"
crm(live)configure# ms MS_mysqldrbd mysqldrbd meta master-max=1  master-node-max=1 clone-max=2 clone-node-max=1 notify="true"
crm(live)configure# show mysqldrbd

确认无误后,提交:

image

6.查看当前集群运行状态

[root@node2 ~]# crm status
============
Last updated: Sat Oct 20 14:54:49 2012
Stack: openais
Current DC: node2.a.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
1 Resources configured.
============

Online: [ node1.a.com node2.a.com ]

Master/Slave Set: MS_mysqldrbd [mysqldrbd]
     Masters: [ node2.a.com ]
     Slaves: [ node1.a.com ]

由上面信息可以看出此时的drbd服务的primary节点为node2.a.com,Secondary节点为node1.a.com。也可以使用如下命令验证当前主机是否已经成为mysql资源的primary节点:

[root@node2 ~]# drbdadm role mysql
Primary/Secondary

[root@node2 ~]# drbd-overview
  0:mysql  Connected Primary/Secondary UpToDate/UpToDate C r----

实现将drbd设置自动挂载至/mysqldata目录。由于自动挂载的集群资源需要运行于drbd服务的Master节点上,并且只能在drbd服务奖某节点设置为primary以后方可以启动

8.确保两个节点上的设备已经卸载:

[root@node1 ~]# umount /dev/drbd0
umount: /dev/drbd0: not mounted

[root@node2 ~]# umount /dev/drbd0
umount: /dev/drbd0: not mounted

9.以下还在node2.a.com节点上操作:

[root@node2 ~]# crm
crm(live)# configure
crm(live)configure# primitive MysqlFS ocf:heartbeat:Filesystem params device="/dev/drbd0" directory="/mnt/mysqldata" fstype="ext3" op start timeout=60s op stop timeout=60s
crm(live)configure# show changed
image

10.在node2上定义mysql资源

先为mysql集群创建一个ip地址资源,通过集群提供服务时使用,这个地址就是客户端访问mysql服务器使用的ip地址

[root@node2 ~]# crm configure primitive myip ocf:heartbeat:IPaddr params ip=192.168.10.88

[root@node2 ~]# crm configure primitive mysqlserver lsb:mysqld

[root@node2 ~]# crm status
============
Last updated: Sat Oct 20 19:04:42 2012
Stack: openais
Current DC: node1.a.com - partition with quorum
Version: 1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f
2 Nodes configured, 2 expected votes
4 Resources configured.
============

Online: [ node1.a.com node2.a.com ]

Master/Slave Set: MS_mysqldrbd [mysqldrbd]
    Masters: [ node2.a.com ]
     Slaves: [ node1.a.com ]
MysqlFS    (ocf::heartbeat:Filesystem):    Started node2.a.com
myip    (ocf::heartbeat:IPaddr):    Started node2.a.com
mysqlserver    (lsb:mysqld):    Started node2.a.com

11.配置资源的各种约束

[root@node1 ~]# crm

crm(live)# configure

crm(live)configure# colocation MysqlFS_with_mysqldrbd inf: MysqlFS MS_mysqldrbd:Master myip mysqlserver

crm(live)configure# order MysqlFS_after_mysqldrbd inf: MS_mysqldrbd:promote MysqlFS:start

crm(live)configure# order myip_after_MysqlFS mandatory: MysqlFS myip

crm(live)configure# order mysqlserver_after_myip mandatory: myip mysqlserver

crm(live)configure# show changed

colocation MysqlFS_with_mysqldrbd inf: MysqlFS MS_mysqldrbd:Master myip mysqlserver

order MysqlFS_after_mysqldrbd inf: MS_mysqldrbd:promote MysqlFS:start

order myip_after_MysqlFS inf: MysqlFS myip

order mysqlserver_after_myip inf: myip mysqlserver

crm(live)configure# verify

crm(live)configure# commit

crm(live)configure# exit

bye

12.查看配置信息和状态,并测试:

[root@node2 ~]# crm configure show
node node1.a.com
node node2.a.com
primitive MysqlFS ocf:heartbeat:Filesystem \
    params device="/dev/drbd0" directory="/mnt/mysqldata" fstype="ext3" \
    op start interval="0" timeout="60s" \
    op stop interval="0" timeout="60s"
primitive myip ocf:heartbeat:IPaddr \
    params ip="192.168.10.88"
primitive mysqldrbd ocf:heartbeat:drbd \
    params drbd_resource="mysql" \
    op monitor interval="30s" role="Master" \
    op monitor interval="31s" role="Slave" \
    op start interval="0" timeout="240s" \
    op stop interval="0" timeout="100s"
primitive mysqlserver lsb:mysqld
ms MS_mysqldrbd mysqldrbd \
    meta master-max="1" master-node-max="1" clone-max="2" clone-node-max="1" notify="true"
colocation MysqlFS_with_mysqldrbd inf: MysqlFS MS_mysqldrbd:Master myip mysqlserver
order MysqlFS_after_mysqldrbd inf: MS_mysqldrbd:promote MysqlFS:start
order myip_after_MysqlFS inf: MysqlFS myip
property $id="cib-bootstrap-options" \
    dc-version="1.1.5-1.1.el5-01e86afaaa6d4a8c4836f68df80ababd6ca3902f" \
    cluster-infrastructure="openais" \
    expected-quorum-votes="2" \
    stonith-enabled="false" \
    no-quorum-policy="ignore"
rsc_defaults $id="rsc-options" \
    resource-stickiness="100"

可见服务在node2上的运行情况

13.在node2上查看群集的运行状态

[root@node2 ~]# service mysqld status
MySQL running (4163)                                       [确定]

14.[root@node2 ~]# mount |grep drbd
/dev/drbd0 on /mnt/mysqldata type ext3 (rw)

[root@node2 ~]# ll /mnt/mysqldata/
总计 24
drwxr-xr-x 5 mysql mysql  4096 10-20 18:10 data
drwx------ 2 root  root  16384 10-19 23:45 lost+found
-rw-r--r-- 1 root  root      0 10-19 23:48 uodekoaix
-rw-r--r-- 1 root  root      7 10-19 23:48 zhaoke

image

查看群集在node1上的运行状态。

image

七、测试

现在一切正常,我们可以验证mysql服务是否能被正常访问:

让node2下线

首先,在node1上面建立一个用户user1,密码:123456.

我们定义的是通过VIP:192.168.10.88来访问mysql服务,现在node2上建立一个可以让某个网段主机能访问的账户(这个内容会同步drbd设备同步到node1上):

[root@node1 ~]# mysql

...

mysql> grant all on *.* to user1@'192.168.%.%' identified by '123456';

Query OK, 0 rows affected (0.01 sec)

mysql> flush privileges;

Query OK, 0 rows affected (0.00 sec)

mysql> exit

Bye

[root@node1 ~]# crm node online

[root@node2 ~]# crm node standby

2012-10-21_135721

image

2012-10-21_135734

image

客户端测

image

image

让node1下线

首先,在node2上面建立一个用户user1,密码:123456.

我们定义的是通过VIP:192.168.10.88来访问mysql服务,现在node2上建立一个可以让某个网段主机能访问的账户(这个内容会同步drbd设备同步到node1上):

[root@node1 ~]# crm node standby

[root@node2 ~]# crm node onlline

image

image 

2012-10-21_135336 

客户端测试

2012-10-21_135515 
mysql> create database zhaoke;
Query OK, 1 row affected (0.04 sec)

mysql> use zhaoke;
Database changed
mysql> create table a
    -> (name char(5),    
    -> age int,
    -> sex char(2)
    -> );
Query OK, 0 rows affected (0.32 sec)
mysql> select * from a;               
Empty set (0.05 sec)
mysql> insert into a values ('zhaok',22,'nv');
Query OK, 1 row affected, 1 warning (0.07 sec)

mysql> select * from a;
+-------+------+------+
| name  | age  | sex  |
+-------+------+------+
| zhaok |   22 | nv   |
+-------+------+------+
1 row in set (0.00 sec)

mysql> desc a;
+-------+---------+------+-----+---------+-------+
| Field | Type    | Null | Key | Default | Extra |
+-------+---------+------+-----+---------+-------+
| name  | char(5) | YES  |     | NULL    |       |
| age   | int(11) | YES  |     | NULL    |       |
| sex   | char(2) | YES  |     | NULL    |       |
+-------+---------+------+-----+---------+-------+
3 rows in set (0.07 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| performance_schema |
| test               |
| zhaoke             |
+--------------------+
5 rows in set (0.01 sec)

node2数据库服务器在线时,可以查看到刚刚创建的文件image

mysql> drop database zhaoke;
Query OK, 1 row affected (0.01 sec)

mysql> show databases;
+--------------------+
| Database           |
+--------------------+
| information_schema |
| mysql              |
| performance_schema |
| test               |
+--------------------+
4 rows in set (0.01 sec)

mysql>

你可能感兴趣的:(mysql,服务器,border,target,blank)