LVS/DR + keepalived配置

1.LVS/DR 搭建的不足及解决方法

    前面的lvs虽然已经配置成功也实现了负载均衡,但是我们测试的时候发现,当某台real server把httpd进程停掉,那么director照样会把请求转发过去,这样就造成了某些请求不正常。所以需要有一种机制用来检测real server的状态,这就是keepalived。它的作用除了可以检测rs状态外,还可以检测备用director的状态,也就是说keepalived可以实现ha集群的功能,当然了也需要一台备用director.

    

2.搭建环境前的工作


hostname

eth0

主director

movies

192.168.1.111

director

xiaomovies

192.168.1.64

real server1

bols

192.168.1.119

real server2

longls

192.168.1.120

        主和从都需要安装keepalived,ipvsadm;

        两台rs安装nginx;

        关闭selinux、清空iptables规则、ipvsadm -C;

 

3.搭建环境

[root@movies ~]# vi /etc/keepalived/keepalived.conf    //更改主director配置文件
vrrp_instance VI_1 {
     state MASTER      #备用服务器上为 BACKUP
     interface eth0
     virtual_router_id 51
     priority 100      #优先级,数值越大优先级越高;备用服务器上为90
     advert_int 1
     authentication {
         auth_type PASS
         auth_pass 1111
     }
     virtual_ipaddress {
         192.168.1.100
     }
 }
 virtual_server 192.168.1.100 80 {
     delay_loop 6            #(每隔6秒查询realserver状态,是否存活)
     lb_algo wlc                 #(轮询算法)
     lb_kind DR                #(Direct Route)
     persistence_timeout 0    #(同一IP的连接多少秒内被分配到同一台realserver,0表示不连接)
     protocol TCP             #(用TCP协议检查realserver状态)
 
    real_server 192.168.1.119 80 {
         weight 100            #(权重)
         TCP_CHECK {
         connect_timeout 10     #(10秒无响应超时)
         nb_get_retry 3
         delay_before_retry 3
         connect_port 80
         }
     }
   real_server 192.168.1.120 80 {
         weight 100
         TCP_CHECK {
         connect_timeout 10
         nb_get_retry 3
         delay_before_retry 3
         connect_port 80
         }
      }
 }

 

测试主director环境是否搭建成功

[root@movies ~]# /etc/init.d/keepalived start
正在启动 keepalived:                                      [确定]

[root@bols ~]# sh /usr/local/sbin/lvs_dr_rs.sh

[root@bols ~]# /etc/init.d/nginx start

[root@bols ~]# cat /usr/local/sbin/lvs_dr_rs.sh
#! /bin/bash
 vip=192.168.1.100
 ifconfig lo:0 $vip broadcast $vip netmask 255.255.255.255 up 
route add -host $vip lo:0
 echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore
 echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce
 echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore
 echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce

[root@bols ~]# ifconfig   //查看rs1VIP

lo:0      Link encap:Local Loopback  

          inet addr:192.168.1.100  Mask:255.255.255.255

          UP LOOPBACK RUNNING  MTU:65536  Metric:1

[root@longls ~]# sh /usr/local/sbin/lvs_dr_rs.sh   //配置文件后rs1上一样

[root@longls ~]# /etc/init.d/nginx star

[root@longls ~]# ifconfig   //查看rs2VIP

lo:0      Link encap:Local Loopback  

          inet addr:192.168.1.100  Mask:255.255.255.255

          UP LOOPBACK RUNNING  MTU:65536  Metric:1

通过测试机上发现rs1和rs2出现次数一样

[root@localhost ~]# curl 192.168.1.100
longls.avi
[root@localhost ~]# curl 192.168.1.100
bols.avi
[root@localhost ~]# curl 192.168.1.100
longls.avi
[root@localhost ~]# curl 192.168.1.100
bols.avi

关闭rs2的nginx服务发现所有请求都到rs1上

[root@longls ~]# /etc/init.d/nginx stop
停止 nginx:                                               [确定]

[root@localhost ~]# curl 192.168.1.100
bols.avi
[root@localhost ~]# curl 192.168.1.100
bols.avi
[root@localhost ~]# curl 192.168.1.100
bols.avi
[root@localhost ~]# curl 192.168.1.100
bols.avi

从新启动res2的nginx服务发现又可以访问rs2;

[root@longls ~]# /etc/init.d/nginx start
正在启动 nginx:                                           [确定]

[root@localhost ~]# curl 192.168.1.100
longls.avi
[root@localhost ~]# curl 192.168.1.100
bols.avi
[root@localhost ~]# curl 192.168.1.100
longls.avi
[root@localhost ~]# curl 192.168.1.100
bols.avi

通过以上可以说明主director环境已搭建成功;

 

[root@xiaomovies ~]# vi /etc/keepalived/keepalived.conf   //将主上的配置文件拷贝至从上,并按以下更改从上配置文件

state MASTER  -> state BACKUP  

 priority 100 -> priority 90

配置完keepalived后,需要开启端口转发(主从dr都要做):

echo 1 > /proc/sys/net/ipv4/ip_forward

[root@localhost ~]# /etc/init.d/keepalived start    
正在启动 keepalived:                                      [确定]

需要注意的是,启动keepalived服务会自动生成vip和ipvsadm规则.

 

测是主从director

主上停止keepalive服务;stop之后,在从上ip addr查看绑定虚拟ip,说明从接管了服务;切换速度很快;

主上启动keepalived服务后,主绑定虚拟ip,接管服务;

[root@xiaomovies ~]# ip add    

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:66:70:98 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.64/24 brd 192.168.1.255 scope global eth0

[root@movies ~]# ip add

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:e6:9e:df brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.111/24 brd 192.168.1.255 scope global eth0
    inet 192.168.1.100/32 scope global eth0

[root@movies ~]# /etc/init.d/keepalived stop
停止 keepalived:                                          [确定]
[root@movies ~]# ip add

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:e6:9e:df brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.111/24 brd 192.168.1.255 scope global eth0

[root@xiaomovies ~]# ip add

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:66:70:98 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.64/24 brd 192.168.1.255 scope global eth0
    inet 192.168.1.100/32 scope global eth0

 [root@movies ~]# /etc/init.d/keepalived start
正在启动 keepalived:                                      [确定]
[root@movies ~]# ip add

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:e6:9e:df brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.111/24 brd 192.168.1.255 scope global eth0
    inet 192.168.1.100/32 scope global eth0
[root@xiaomovies ~]# ip add

2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 00:0c:29:66:70:98 brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.64/24 brd 192.168.1.255 scope global eth0

 


你可能感兴趣的:(+,keepalived配置,LVS/DR)