双主模型的ipvs高可用集群

第一步,前期准备

首先安装五台CentOS系统,所有系统都是单网卡
两个虚拟路由器地址:192.168.3.151,192.168.3.152

  • 全部关闭selinux
vi /etc/selinux/config
SELINUX=disabled
  • 全部关闭防火墙
chkconfig --level 123456 iptables off

所有服务器的ip都按如下hosts表配置:

]# vim /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.3.10    lsh.com lsh  #时间同步服务器,centos7
192.168.3.101   n1.com  n1  #keepalived服务器1,centos6
192.168.3.102   n2.com  n2  #keepalived服务器2,centos6
192.168.3.103   n3.com  n3  #web服务器1,centos6
192.168.3.104   n4.com  n4  #web服务器2,centos6

第二步,节点lsh配置时间同步

  • 配置同步时间服务器
]# yum -y install ntp ntpdate    #安装程序包
]# vim /etc/ntp.conf   # 修改配置文件
server time.windows.com
server s2m.time.edu.cn
server 0.asia.pool.ntp.org
server 1.asia.pool.ntp.org
server 2.asia.pool.ntp.org
server 3.asia.pool.ntp.org
server 127.127.1.0 iburst  local clock #当外部时间不可用时,使用本地时间。
restrict 192.168.3.1 mask 255.255.255.0 nomodify  #允许更新的IP地址段
]# systemctl start ntpd    #启动服务
]# systemctl enable ntpd.service    #设置开机启动
  • 配置其他节点同步时间
]# yum -y install ntpdate
]# ntpdate 192.168.3.10    #同步时间
]# yum -y install chrony  #安装程序包
]# vim /etc/chrony.conf    #修改配置文件
server 192.168.3.10 iburst    #和时间服务器同步
]# chkconfig --level 35 chronyd on  #centos 6开机启动
]# systemctl enable chronyd.service  #centos7开机启动

第三步,节点n3和n4配置web服务器

配置节点n3
]# yum -y install httpd
vim /var/www/html/index.html #两个节点各设置不相同的首页

www.n3.com

配置两个脚本,第二个脚本vip改为192.168.3.152,iface改为lo:1

]# vim setrs.sh和setrs2.sh
#!/bin/bash
#
vip='192.168.3.151'
netmask='255.255.255.255'
iface='lo:0'
case $1 in
start)
        echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
        echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
        echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
        echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
        ifconfig $iface $vip netmask $netmask broadcast $vip up
        route add -host $vip dev $iface
        ;;
stop)
        ifconfig $iface down
        echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
        echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
        echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
        echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
        ;;
*)
        exit 1
esac
n3~]# bash -n setrs.sh    #检测脚本语法
n3~]# bash -x setrs.sh start     #测试并生效两个脚本
+ vip=192.168.3.151
+ netmask=255.255.255.255
+ iface=lo:0
+ case $1 in
+ echo 1
+ echo 1
+ echo 2
+ echo 2
+ ifconfig lo:0 192.168.3.151 netmask 255.255.255.255 broadcast 192.168.3.151 up
+ route add -host 192.168.3.151 dev lo:0
n3~]# systemctl start httpd.service    #启动web服务
n3~]# scp setrs* n4:/root  #拷贝脚本到节点n4

接着节点n4的配置和上面相同
此时用节点lsh测试两台web是否正常工作

[root@lsh ~]# curl http://192.168.3.103

www.n3.com

[root@lsh ~]# curl http://192.168.3.104

www.n4.com

第四步,节点n1和n2配置keepalived服务

~]# yum -y install keepalived #安装keepalived服务
~]# vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {
   notification_email {
     root@localhost  #发送报告的邮箱
   }
   notification_email_from keepalived@localhost
   smtp_server 127.0.0.1  #邮件服务器
   smtp_connect_timeout 30  #超时时间
   router_id n1  #物理设备ID
   vrrp_mcast_group4 224.1.101.33  #多播地址
}

vrrp_instance VI_1 {  #定义虚拟路由
    state MASTER  #路由器状态
    interface eth0  #绑定路由器使用的物理接口
    virtual_router_id 31  #路由器ID
    priority 100  #当前主机优先级,范围0-255
    advert_int 1  #vrrp通告时间间隔
    authentication {  #简单字符串验证
        auth_type PASS
        auth_pass 11112222  #8位密码
    }
    virtual_ipaddress {  #虚拟地址
        192.168.3.151/24 dev eth0
    }
    notify_master "/etc/keepalived/notify.sh master"  #转为主节点触发脚本
    notify_backup "/etc/keepalived/notify.sh master"  #转为备节点触发脚本
    notify_fault "/etc/keepalived/notify.sh master"#转为失败状态触发脚本
}

vrrp_instance VI_2 {
    state BACKUP
    interface eth0
    virtual_router_id 32
    priority 96
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 33334444
    }
    virtual_ipaddress {
        192.168.3.152/24 dev eth0
    }
    notify_master "/etc/keepalived/notify.sh master"
    notify_backup "/etc/keepalived/notify.sh master"
    notify_fault "/etc/keepalived/notify.sh master"
}

virtual_server 192.168.3.151 80 {
    delay_loop 1  #服务轮询的间隔
    lb_algo wrr  #调度方法
    lb_kind DR  #集群类型
    protocol TCP  #服务协议
    sorry_server 127.0.0.1 80  #全失效备用地址
    real_server 192.168.3.103 80 {  #健康状态监测
        weight 1
        HTTP_GET {  #应用层监测
            url {
                path /index.html  #监控的url
                status_code 200  #健康时的响应码
            }
        nb_get_retry 3  #重试次数
        delay_before_retry 2  #重试间隔
        connect_timeout 3  #连接超时时长
        }
    }
    real_server 192.168.3.104 80 {
        weight 1
        HTTP_GET {
            url {
                path /index.html
                status_code 200
            }
        nb_get_retry 3
        delay_before_retry 2
        connect_timeout 3
        }
    }
}
virtual_server 192.168.3.152 80 {
    delay_loop 1
    lb_algo wrr
    lb_kind DR
    protocol TCP
    sorry_server 127.0.0.1 80
    real_server 192.168.3.103 80 {
        weight 1
        HTTP_GET {
            url {
                path /index.html
                status_code 200
            }
        nb_get_retry 3
        delay_before_retry 2
        connect_timeout 3
        }
    }
    real_server 192.168.3.104 80 {
        weight 1
        HTTP_GET {
            url {
                path /index.html
                status_code 200
            }
        nb_get_retry 3
        delay_before_retry 2
        connect_timeout 3
        }
    }
}

第五步,最终测试

  • 新建两个n2的虚拟终端,来查看监测数据n2接口的数据
    • tcpdump -i eth0 -nn host 224.1.101.33
      监测n2节点eth0接口的224.1.101.33组播信息
    • tail -f /var/log/messages
      监测n2节点的更新日志
[root@n2 ~]# service keepalived start  #启动n2的keepalived服务
[root@n2 ~]# ip a l  #查看网卡接口信息,151和152都成功漂移在这节点
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0:  mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:b8:49:7c brd ff:ff:ff:ff:ff:ff
    inet 192.168.3.102/24 brd 192.168.3.255 scope global eth0
    inet 192.168.3.152/24 scope global secondary eth0
    inet 192.168.3.151/24 scope global secondary eth0
    inet6 fe80::20c:29ff:feb8:497c/64 scope link 
       valid_lft forever preferred_lft forever
You have new mail in /var/spool/mail/root
[root@n2 ~]# ipvsadm -ln  #查看虚拟路由web服务状态
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.3.151:80 wrr
  -> 192.168.3.103:80             Route   1      0          0         
  -> 192.168.3.104:80             Route   1      0          0         
TCP  192.168.3.152:80 wrr
  -> 192.168.3.103:80             Route   1      0          0         
  -> 192.168.3.104:80             Route   1      0          0   
08:47:59.102497 IP 192.168.3.102 > 224.1.101.33: VRRPv2, Advertisement, vrid 31, prio 96, authtype simple, intvl 1s, length 20
08:47:59.448451 IP 192.168.3.102 > 224.1.101.33: VRRPv2, Advertisement, vrid 32, prio 100, authtype simple, intvl 1s, length 20
08:48:00.104569 IP 192.168.3.102 > 224.1.101.33: VRRPv2, Advertisement, vrid 31, prio 96, authtype simple, intvl 1s, length 20
08:48:00.449542 IP 192.168.3.102 > 224.1.101.33: VRRPv2, Advertisement, vrid 32, prio 100, authtype simple, intvl 1s, length 20

在实时监测的多播信息中,因只有n2的心跳包,所以n2自动漂移两个虚拟路由IP:192.168.3.151,192.168.3.152

Apr 11 08:46:05 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_2) Transition to MASTER STATE
Apr 11 08:46:06 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_2) Entering MASTER STATE
Apr 11 08:46:06 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_2) setting protocol VIPs.
Apr 11 08:46:06 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_2) Sending gratuitous ARPs on eth0 for 192.168.3.152
Apr 11 08:46:06 n2 Keepalived_healthcheckers[1268]: Netlink reflector reports IP 192.168.3.152 added
Apr 11 08:46:07 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_1) Transition to MASTER STATE
Apr 11 08:46:08 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_1) Entering MASTER STATE
Apr 11 08:46:08 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_1) setting protocol VIPs.
Apr 11 08:46:08 n2 Keepalived_healthcheckers[1268]: Netlink reflector reports IP 192.168.3.151 added

在实时监测的n2日志中,n2的VI_1和VI_2都是MASTER

[root@n1 ~]# service keepalived start
正在启动 keepalived:                                      [确定]
[root@n1 ~]# ip a l
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0:  mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:c4:68:e0 brd ff:ff:ff:ff:ff:ff
    inet 192.168.3.101/24 brd 192.168.3.255 scope global eth0
    inet 192.168.3.151/24 scope global secondary eth0
    inet6 fe80::20c:29ff:fec4:68e0/64 scope link 
       valid_lft forever preferred_lft forever
[root@n1 ~]# ipvsadm -ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
  -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
TCP  192.168.3.151:80 wrr
  -> 192.168.3.103:80             Route   1      0          0         
  -> 192.168.3.104:80             Route   1      0          0         
TCP  192.168.3.152:80 wrr
  -> 192.168.3.103:80             Route   1      0          0         
  -> 192.168.3.104:80             Route   1      0          0     

开启n1的keepalived服务,看见151漂移到n1的eth0接口,并且ipvs也正常。

09:19:30.724975 IP 192.168.3.101 > 224.1.101.33: VRRPv2, Advertisement, vrid 31, prio 100, authtype simple, intvl 1s, length 20
09:19:31.726726 IP 192.168.3.102 > 224.1.101.33: VRRPv2, Advertisement, vrid 32, prio 100, authtype simple, intvl 1s, length 20
09:19:31.726861 IP 192.168.3.101 > 224.1.101.33: VRRPv2, Advertisement, vrid 31, prio 100, authtype simple, intvl 1s, length 20
09:19:32.728876 IP 192.168.3.102 > 224.1.101.33: VRRPv2, Advertisement, vrid 32, prio 100, authtype simple, intvl 1s, length 20

此时分析多播信息的抓包,此时n1和n2都在发送心跳包,因151设置最高权限节点是n1,152的最高权限节点是n2,所以各自成功漂移一个路由。

Apr 11 09:12:45 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_1) Received higher prio advert
Apr 11 09:12:45 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_1) Entering BACKUP STATE
Apr 11 09:12:45 n2 Keepalived_vrrp[1269]: VRRP_Instance(VI_1) removing protocol VIPs.
Apr 11 09:12:45 n2 Keepalived_healthcheckers[1268]: Netlink reflector reports IP 192.168.3.151 removed

查看n2的日志信息,此时n2的VI_1不是最高权限,状态变成BACKUP,移除VIPs,移除151。

[root@lsh ~]# curl http://192.168.3.151

www.n3.com

[root@lsh ~]# curl http://192.168.3.151

www.n4.com

[root@lsh ~]# curl http://192.168.3.152

www.n4.com

[root@lsh ~]# curl http://192.168.3.152

www.n3.com

此时测试打开151和152,各自都能轮询到两台web节点,不浪费服务器各种资源,负载均衡。并且151和152中任何一台宕机时,另一台会自动接管所有服务,达到访问网站时永不宕机,从而实现双主模型的ipvs的高可用集群。

你可能感兴趣的:(双主模型的ipvs高可用集群)