Pacemaker

Pacemaker

1.安装Pacemaker和crosync在Server2|Server3上

[root@server2 ~]#yum install -y pacemaker corosync -y
[root@server2 ~]#ls
crmsh-1.2.6-0.rc2.2.1.x86_64.rpm  pssh-2.3.1-2.1.x86_64.rpm
[root@server2 ~]#yum install * -y
[root@server2 ~]# cd /etc/corosync/
[root@server2 corosync]# ls
corosync.conf.example  corosync.conf.example.udpu  service.d  uidgid.d
[root@server2 corosync]# cp corosync.conf.example corosync.conf
[root@server2 corosync]# vim corosync.conf
.....
# Please read the corosync.conf.5 manual page
compatibility: whitetank

totem {
    version: 2
    secauth: off
    threads: 0
    interface {
        ringnumber: 0
        bindnetaddr: 172.25.66.0    #修改为自己的网段
        mcastaddr: 226.94.1.1
        mcastport: 6005         #保证RS两端端口一致
        ttl: 1
    }
}

logging {
    fileline: off
    to_stderr: no
    to_logfile: yes
    to_syslog: yes
    logfile: /var/log/cluster/corosync.log
    debug: off
    timestamp: on
    logger_subsys {
        subsys: AMF
        debug: off
    }
}

amf {
    mode: disabled
}
service{    #设定pacemeker以插件的方式工作,corosync开启时,自动打开pacemaker
    ver:0
    name:pacemaker
}
....
[root@server2 corosync]# /etc/init.d/corosync start
Starting Corosync Cluster Engine (corosync):               [  OK  ]

**在Server3中同步认证
[root@server2 corosync]# scp corosync.conf [email protected]:/etc/corosync/
root@172.25.66.3's password: 
corosync.conf                                   100%  479     0.5KB/s   00:00   
[root@server2 corosync]# crm status     #查看状态
Last updated: Sun Oct  1 10:07:11 2017
Last change: Sun Oct  1 10:03:28 2017 via crmd on server2
Stack: classic openais (with plugin)
Current DC: server2 - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured, 2 expected votes
0 Resources configured


Online: [ server2 server3 ]
[root@server2 corosync]# crm_mon    #监控
[root@server2 corosync]#

添加fence机制

[root@foundation66 Desktop]# systemctl status fence_virtd.service
[root@server2 corosync]# crm 
crm(live)# configure 
crm(live)configure# show
node server2
node server3
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"
crm(live)configure# bye
bye
[root@server2 corosync]# stonith_admin -I
 fence_pcmk
 fence_legacy
2 devices found
[root@server2 corosync]# yum install fence-virt -y  #在server3上也同时安装
[root@server2 corosync]# stonith_admin -I
 fence_xvm
 fence_virt
 fence_pcmk
 fence_legacy
4 devices found
[root@server2 corosync]# crm configure show
node server2
node server3
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"
[root@server2 ~]# crm
crm(live)# configure 
crm(live)configure# show
node server2
node server3
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"
crm(live)configure# primitive fence stonith:fence_xvm params pcmk_host_map="server2:swever2;server3:server3" op monitor interval=lmin
crm(live)configure# commit 
crm(live)configure# show
node server2
node server3
primitive fence stonith:fence_xvm \
    params pcmk_host_map="server2:swever2;server3:server3" \
    op monitor interval="lmin"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"
crm(live)configure# bye
bye
[root@server2 ~]# mkdie /etc/cluster   #在server3上也建立同样的目录

物理机发送key

[root@foundation66 Desktop]# systemctl start fence_virtd.service 
[root@foundation66 Desktop]# systemctl status fence_virtd.service
[root@foundation66 Desktop]# scp /etc/cluster/fence_xvm.key [email protected]:/etc/cluster/
root@172.25.66.2's password: 
fence_xvm.key                                  100%  128     0.1KB/s   00:00    
[root@foundation66 Desktop]# scp /etc/cluster/fence_xvm.key [email protected]:/etc/cluster/
[email protected]'s password: 
fence_xvm.key                                  100%  128     0.1KB/s   00:00    
[root@foundation66 Desktop]# 

添加fence

[root@server2 cluster]# crm
crm(live)# resource 
crm(live)resource# show
 fence  (stonith:fence_xvm):    Stopped 
crm(live)resource# refresh 
Waiting for 1 replies from the CRMd. OK
crm(live)resource# start vmfence
ERROR: resource vmfence does not exist
crm(live)# configure 
crm(live)configure# show
node server2
node server3
primitive fence stonith:fence_xvm \
    params pcmk_host_map="server2:swever2;server3:server3" \
    op monitor interval="lmin"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2"
crm(live)configure# cd
crm(live)# resource 
crm(live)resource# refresh 
Waiting for 1 replies from the CRMd. OK
crm(live)resource# show
 fence  (stonith:fence_xvm):    Stopped 
crm(live)resource# start fence
crm(live)resource# show
 fence  (stonith:fence_xvm):    Stopped 
crm(live)resource# bye
bye
[root@server2 cluster]# 
****在另外一台RS上crm_mon查看状态

添加vip资源

[root@server2 cluster]# crm
crm(live)# configure 
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.66.100 cidr_netmask=24 op monitor interval=1min
crm(live)configure# commit 
crm(live)configure# bye 
bye
crm(live)configure# property no-quorum-policy=ignore    #节点数过少时不监测
crm(live)configure# commit 
crm(live)configure# show
node server2
node server3
primitive fence stonith:fence_xvm \
    params pcmk_host_map="server2:swever2;server3:server3" \
    op monitor interval="lmin" \
    meta target-role="Started"
primitive vip ocf:heartbeat:IPaddr2 \
    params ip="172.25.66.100" cidr_netmask="24" \
    op monitor interval="1min"
property $id="cib-bootstrap-options" \
    dc-version="1.1.10-14.el6-368c726" \
    cluster-infrastructure="classic openais (with plugin)" \
    expected-quorum-votes="2" \
    no-quorum-policy="ignore"
crm(live)configure# cd
crm(live)# resource 
crm(live)resource# cleanup vmfence
Error performing operation: No such device
crm(live)resource# cleanup fence    #清除
Cleaning up fence on server2
Cleaning up fence on server3
Waiting for 1 replies from the CRMd. OK
crm(live)resource# bye
bye
[root@server2 cluster]# 

监控状态crm_mon

Last updated: Sun Oct  1 11:09:35 2017
Last change: Sun Oct  1 11:09:26 2017 via crmd on server3
Stack: classic openais (with plugin)
Current DC: server2 - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured, 2 expected votes
2 Resources configured


Online: [ server2 server3 ]

fence   (stonith:fence_xvm):    Started server2
vip     (ocf::heartbeat:IPaddr2):   Started server3

#节点迁移|server3状态为off
[root@server3 ~]# ip addr show
 inet 172.25.66.100/24 brd 172.25.66.255 scope global secondary eth1

测试:

[root@server3 ~]# /etc/init.d/corosync stop
Signaling Corosync Cluster Engine (corosync) to terminate: [  OK  ]
Waiting for corosync services to unload:.                  [  OK  ]
[root@server3 ~]# 

[root@server2 ~]# ip addr
.....
    inet 172.25.66.100/24 brd 172.25.66.255 scope global secondary eth1
.....
[root@server2 ~]# 

集群的资源不要手动开启

#添加Apache服务

.....
 921 
 922     SetHandler server-status
 923     Order deny,allow
 924     Deny from all
 925     Allow from 172.0.0.1
 926 
.....

Keepalived

实验环境:
    Server1|Server2实现高可用
    Server3|Server4定义后端RS

[root@server1 ~]# ls
keepalived-1.3.6.tar.gz
[root@server1 ~]# tar zxf keepalived-1.3.6.tar.gz 
[root@server1 ~]# cd keepalived-1.3.6
[root@server1 keepalived-1.3.6]# yum install gcc openssl-devel -y
[root@server1 keepalived-1.3.6]# ./configure --prefix=/usr/local/keepalived --with-init=SYSV
.....
Use IPVS Framework       : Yes
IPVS use libnl           : No
IPVS syncd attributes    : No
.....
[root@server1 keepalived-1.3.6]# make && make install
[root@server1 etc]# chmod +x /usr/local/keepalived/etc/rc.d/init.d/keepalived
[root@server1 etc]# ln -s /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/init.d/
[root@server1 etc]# ln -s /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
[root@server1 etc]# ln -s /usr/local/keepalived/etc/keepalived/ /etc/
[root@server1 etc]# ln -s /usr/local/keepalived/sbin/keepalived /sbin/

主备模式

[root@server1 etc]# vim /etc/keepalived/keepalived.conf 
     1  ! Configuration File for keepalived
     2  
     3  global_defs {
     4     notification_email {
     5      root@localhost
     6     }
     7     notification_email_from Alexandre.Cassen@firewall.loc
     8     smtp_server 172.0.0.1
     9     smtp_connect_timeout 30
    10     router_id LVS_DEVEL
    11     vrrp_skip_check_adv_addr
    12     #vrrp_strict
    13     vrrp_garp_interval 0
    14     vrrp_gna_interval 0
    15  }
    16  
    17  vrrp_instance VI_1 {
    18      state MASTER        #备节点为BACKUP
    19      interface eth1
    20      virtual_router_id 117   #0-255
    21      priority 100        #定义优先级:主高于备(数字越大优先级越高)
    22      advert_int 1
    23      authentication {
    24          auth_type PASS
    25          auth_pass 1111
    26      }
    27      virtual_ipaddress {     #定义VIP
    28      172.25.66.100/24
    29      }
    30  }
    31  
    32  virtual_server 172.25.66.100 80 {   #定义后端RS
    33      delay_loop 6
    34      lb_algo rr          #定义调度算法
    35      lb_kind DR          #定义工作模式
    36      #persistence_timeout 50     #持续链接:定义每次访问不同的后端
    37      protocol TCP
    38  
    39      real_server 172.25.66.3 80 {
    40          weight 1
    41          TCP_CHECK {
    42              connect_timeout 3
    43              nb_get_retry 3
    44              delay_before_retry 3
    45          }
    46      }
    47      real_server 172.25.66.4 80 {
    48          weight 1
    49          TCP_CHECK {
    50              connect_timeout 3
    51              nb_get_retry 3
    52              delay_before_retry 3
    53          }
    54      }
    55  }
    56  
[root@server1 etc]# /etc/init.d/keepalived start
**此时VIP已经自动添加

测试:

在Server3|Server4上安装appache服务,添加VIP

ip addr add 172.25.66.100 dev eth1
/etc/init.d/httpd start

[root@foundation66 Desktop]# for i in range {1..10};do curl 172.25.66.100;done
Server4
Server3
Server4
Server3
Server4
Server3
Server4
Server3
Server4
Server3
Server4
[root@foundation66 Desktop]#



#备
[root@server1 etc]# scp -r /usr/local/keepalived/ [email protected]:/usr/local/
[root@server2 keepalived]# chmod +x /usr/local/keepalived/etc/rc.d/init.d/keepalived
[root@server2 keepalived]# ln -s /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/init.d/
[root@server2 keepalived]# ln -s /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
[root@server2 keepalived]# ln -s /usr/local/keepalived/etc/keepalived/ /etc/
[root@server2 keepalived]# ln -s /usr/local/keepalived/sbin/keepalived /sbin/
[root@server2 keepalived]# vim /etc/keepalived/keepalived.conf 
.....
MASTER--->BACKUP
定义优先级
.....
[root@server2 keepalived]# /etc/init.d/keepalived start
Starting keepalived:                                       [  OK  ]
[root@server2 keepalived]# ip addr
    *此时还没有VIP,但是当主宕机之后,马上实现VIP接管

Server1停止keeepalived,VIP迁移

[root@server2 keepalived]# ipvsadm -l
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  172.25.66.100:http rr
  -> server3:http                 Route   1      0          6         
  -> server4:http                 Route   1      0          5         
[root@server2 keepalived]# 






[root@server3 ~]# sysctl -a | grep ipv4

ftp工作原理:利用持续链接参数

VRRP:虚拟路由冗余协议
keepaalived:不适合做有状态服务的高可用
IO复用
Apache为什么比Nginx慢

lvs 内核编译

Fnat+keepalived

[root@server4 ~]# ls
kernel-2.6.32-220.23.1.el6.src.rpm  newt-devel-0.52.11-3.el6.x86_64.rpm
Lvs-fullnat-synproxy.tar.gz         slang-devel-2.2.1-1.el6.x86_64.rpm
asciidoc-8.4.5-4.1.el6.noarch.rpm
[root@server4 ~]# rpm -ivh kernel-2.6.32-220.23.1.el6.src.rpm 
    ****很多wraning
[root@server4 ~]# cd rpmbuild/
[root@server4 rpmbuild]# ls
SOURCES  SPECS
[root@server4 rpmbuild]# yum install rpm-build -y
[root@server4 rpmbuild]# ls
SOURCES  SPECS
[root@server4 rpmbuild]# cd SPECS/
[root@server4 SPECS]# ls
kernel.spec
[root@server4 SPECS]# rpmbuild -bp kernel.spec 
    ***报错
[root@server4 SPECS]# yum install gcc redhat-rpm-config patchutils xmlto asciidoc elfutils-libelf-devel zlib-devel binutils-devel newt-devel python-devel perl-ExtUtils-Embed
[root@server4 SPECS]# rpmbuild -bp kernel.spec 
[root@server4 ~]# yum install newt-devel-0.52.11-3.el6.x86_64.rpm slang-devel-2.2.1-1.el6.x86_64.rpm  -y
[root@server4 ~]# rpm -ivh asciidoc-8.4.5-4.1.el6.noarch.rpm 
[root@server4 SPECS]# yum install hmaccalc -y
[root@server4 SPECS]# rpmbuild -bp kernel.spec 
***再连一个ssh生成随机数
[root@server4 ~]# yum install rng-too*
[root@server4 ~]# rngd -r /dev/urandom
[root@server4 SPECS]# cd
[root@server4 ~]# ls
asciidoc-8.4.5-4.1.el6.noarch.rpm   newt-devel-0.52.11-3.el6.x86_64.rpm
kernel-2.6.32-220.23.1.el6.src.rpm  rpmbuild
Lvs-fullnat-synproxy.tar.gz         slang-devel-2.2.1-1.el6.x86_64.rpm
[root@server4 ~]# tar zxf  Lvs-fullnat-synproxy.tar.gz 
[root@server4 ~]# cd lvs-fullnat-synproxy/
[root@server4 lvs-fullnat-synproxy]# ls
lvs-2.6.32-220.23.1.el6.patch  README
lvs-tools.tar.gz               toa-2.6.32-220.23.1.el6.patch
[root@server4 lvs-fullnat-synproxy]# cd
[root@server4 ~]# cd rpmbuild/BUILD/kernel-2.6.32-220.23.1.el6/linux-2.6.32-220.23.1.el6.x86_64/
[root@server4 linux-2.6.32-220.23.1.el6.x86_64]# pwd
/root/rpmbuild/BUILD/kernel-2.6.32-220.23.1.el6/linux-2.6.32-220.23.1.el6.x86_64
[root@server4 linux-2.6.32-220.23.1.el6.x86_64]# cp ~/lvs-fullnat-synproxy/lvs-2.6.32-220.23.1.el6.patch .
[root@server4 linux-2.6.32-220.23.1.el6.x86_64]# patch -p1 < lvs-2.6.32-220.23.1.el6.patch 
[root@server4 linux-2.6.32-220.23.1.el6.x86_64]# make 

拉伸硬盘

[root@server4 ~]# df
Filesystem                   1K-blocks    Used Available Use% Mounted on
/dev/mapper/VolGroup-lv_root   7853764 7834672         0 100% /
tmpfs                           961188       0    961188   0% /dev/shm
/dev/vda1                       495844   33469    436775   8% /boot

[root@server4 ~]# pvs
  PV         VG       Fmt  Attr PSize PFree
  /dev/vda2  VolGroup lvm2 a--  8.51g    0 
[root@server4 ~]# pvcreate /dev/vdc 
  Physical volume "/dev/vdc" successfully created
[root@server4 ~]# vgextend VolGroup /dev/vdc
  Volume group "VolGroup" successfully extended
[root@server4 ~]# lvextend -L +8G /dev/VolGroup/lv_
lv_root  lv_swap  
[root@server4 ~]# lvextend -L +8G /dev/VolGroup/lv_root 
  Extending logical volume lv_root to 15.61 GiB
  Insufficient free space: 2048 extents needed, but only 2047 available
[root@server4 ~]# lvextend -l +2047 /dev/VolGroup/lv_root 
  Extending logical volume lv_root to 15.61 GiB
  Logical volume lv_root successfully resized
[root@server4 ~]# resize2fs /dev/VolGroup/lv_root 
resize2fs 1.41.12 (17-May-2010)
Filesystem at /dev/VolGroup/lv_root is mounted on /; on-line resizing required
old desc_blocks = 1, new_desc_blocks = 1
Performing an on-line resize of /dev/VolGroup/lv_root to 4090880 (4k) blocks.
The filesystem on /dev/VolGroup/lv_root is now 4090880 blocks long.
[root@server4 ~]# df -h
Filesystem                    Size  Used Avail Use% Mounted on
/dev/mapper/VolGroup-lv_root   16G  7.5G  7.2G  52% /
tmpfs                         939M     0  939M   0% /dev/shm
/dev/vda1                     485M   33M  427M   8% /boot
[root@server4 ~]#


[root@server4 linux-2.6.32-220.23.1.el6.x86_64]# make
[root@server4 linux-2.6.32-220.23.1.el6.x86_64]# make modules_install
[root@server4 linux-2.6.32-220.23.1.el6.x86_64]# make install

Cgi和Fastcgi的区别

你可能感兴趣的:(Pacemaker)