实验目的
构建LVS-DR架构,为了达到LVS的高可用目的,故在LVS-DR的Director端做Keepalive集群,在Director-A上做keepalive-A,在Director做keepalive-B,LVS-RS1和LVS-RS2为后端的两台web服务器,通过在Director上做keepalive集群实现高可用的目的
实验拓扑图
实验环境(keepalive节点同时作为LVS的director节点)
keepalive-A(Director-A) 172.16.253.108
keepalive-B(Director-A) 172.16.253.105
LVS-RS1 172.16.250.127
LVS-RS2 172.16.253.193
VIP 172.16.253.150
client 172.16.253.177
LVS-RS web集群
为了更好的观察实验效果,故在此将RS1和RS2的web页面内容设置内容不一致,以致可以更清晰的区分RS1服务端和RS2服务端
LVS-RS1
[root@LVS-RS1 ~]# systemctl restart chronyd \\多台服务器时间同步
[root@LVS-RS1 ~]# iptables -F
[root@LVS-RS1 ~]# setenforce 0
[root@LVS-RS1 ~]# yum -y install nginx
[root@LVS-RS1 ~]# vim /usr/share/nginx/html/index.html
Web RS1
[root@LVS-RS1 ~]# systemctl start nginx
修改内核参数并添加VIP地址
[root@LVS-RS1 ~]# vim lvs_dr.sh
#!/bin/bash
#
vip=172.16.253.150
mask=255.255.255.255
iface="lo:0"
case $1 in
start)
ifconfig $iface $vip netmask $mask broadcast $vip up
route add -host $vip dev $iface
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
;;
stop)
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
ifconfig $iface down
;;
*)
echo "Usage:$(basename $0) start|stop"
exit 1
;;
esac
[root@LVS-RS1 ~]# bash lvs_dr.sh start
[root@LVS-RS1 ~]# ifconfig
lo:0: flags=73 mtu 65536
inet 172.16.253.150 netmask 255.255.255.255
loop txqueuelen 1 (Local Loopback)
LVS-RS2
[root@LVS-RS2 ~]# systemctl restart chronyd \\多台服务器时间同步
[root@LVS-RS2 ~]# iptables -F
[root@LVS-RS2 ~]# setenforce 0
[root@LVS-RS2 ~]# yum -y install nginx
[root@LVS-RS2 ~]# vim /usr/share/nginx/html/index.html
Web RS2
[root@LVS-RS2 ~]# systemctl start nginx
修改内核参数并添加VIP地址
[root@LVS-RS2 ~]# vim lvs_dr.sh
#!/bin/bash
#
vip=172.16.253.150
mask=255.255.255.255
iface="lo:0"
case $1 in
start)
ifconfig $iface $vip netmask $mask broadcast $vip up
route add -host $vip dev $iface
echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 1 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 2 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 2 > /proc/sys/net/ipv4/conf/lo/arp_announce
;;
stop)
echo 0 > /proc/sys/net/ipv4/conf/all/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_ignore
echo 0 > /proc/sys/net/ipv4/conf/all/arp_announce
echo 0 > /proc/sys/net/ipv4/conf/lo/arp_announce
ifconfig $iface down
;;
*)
echo "Usage:$(basename $0) start|stop"
exit 1
;;
esac
[root@LVS-RS1 ~]# bash lvs_dr.sh start
[root@LVS-RS1 ~]# ifconfig
lo:0: flags=73 mtu 65536
inet 172.16.253.150 netmask 255.255.255.255
loop txqueuelen 1 (Local Loopback)
keepalive集群
Director节点搭建
keepalive-A
[root@keepaliveA ~]# systemctl restart chronyd \\多台服务器时间同步
[root@keepaliveA ~]# yum -y install ipvsadm
keepalive-B
[root@keepaliveB ~]# systemctl restart chronyd \\多台服务器时间同步
[root@keepaliveB ~]# yum -y install ipvsadm
keepalive上配置web的sorry server
keepalive-A
[root@keepaliveA ~]# yum -y install nginx
[root@keepaliveA ~]# vim /usr/share/nginx/html/index.html
sorry from Director-A(keepalive-A)
[root@keepaliveA ~]# systemctl start nginx
keepalive-B
[root@keepalive-B ~]# yum -y install nginx
[root@keepalive-B ~]# vim /usr/share/nginx/html/index.html
sorry from Director-B(keepalive-B)
[root@keepaliveB ~]# systemctl start nginx
keepalive-A配置keepalive
keepalive-A
[root@keepalive-A ~]# iptables -F
[root@keepalive-A ~]# yum -y install keepalived
[root@keepaliveA ~]# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email { \\定义邮件通知设置
[email protected] \\定义邮件接收地址
}
notification_email_from [email protected] \\邮件发送者
smtp_server 127.0.0.1 \\邮件server服务器
smtp_connect_timeout 30 \\连接超时
router_id keepaliveA \\route的ID信息,自定义
vrrp_mcast_group4 224.103.5.5 \\多播地址段,默认为224.0.0.18
}
vrrp_instance VI_A {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass qr8hQHuL
}
virtual_ipaddress {
172.16.253.150/32 dev ens33
}
virtual_server 172.16.253.150 80 {
delay_loop 6 \\服务轮询的时间间隔
lb_algo rr \\定义调度方法;
lb_kind DR \\集群的类型;
protocol TCP \\服务协议,仅支持TCP;
sorry_server 127.0.0.1 80 \\指定sorry server,且为本机的wen服务提供的web页面
real_server 172.16.250.127 80 {
weight 1 \\权重
SSL_GET { \\应用层检测
url {
path / \\定义要监控的URL
#digest ff20ad2481f97b1754ef3e12ecd3a9cc \\判断上述检测机制为健康状态的响应的内容的校验码;
status_code 200 \\判断上述检测机制为健康状态的响应码
}
connect_timeout 3 \\连接请求的超时时长;
nb_get_retry 3 \\重试次数
delay_before_retry 1 \\重试之前的延迟时长
}
}
real_server 172.16.253.193 80 {
weight 1
SSL_GET {
url {
path /
#digest ff20ad2481f97b1754ef3e12ecd3a9cc
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 1
}
}
}
[root@keepaliveA ~]# systemctl start keepalived
[root@keepaliveA ~]# ip a l
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:75:dc:3c brd ff:ff:ff:ff:ff:ff
inet 172.16.253.150/32 scope global ens33
valid_lft forever preferred_lft forever
[root@keepaliveA ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.16.253.150:80 rr
-> 172.16.250.127:80 Route 1 0 0
-> 172.16.253.193:80 Route 1 0 0
keepalive-B配置keepalive
keepalive-B
[root@keepalive-B ~]# iptables -F
[root@keepalive-B ~]# yum -y install keepalived
[root@keepaliveA ~]# vim /etc/keepalived/keepalived.conf
global_defs {
notification_email { \\定义邮件通知设置
[email protected] \\定义邮件接收地址
}
notification_email_from [email protected] \\邮件发送者
smtp_server 127.0.0.1 \\邮件server服务器
smtp_connect_timeout 30 \\连接超时
router_id keepaliveA \\route的ID信息,自定义
vrrp_mcast_group4 224.103.5.5 \\多播地址段,默认为224.0.0.18
}
vrrp_instance VI_A {
state BACKUP
interface ens33
virtual_router_id 51
priority 95
advert_int 1
authentication {
auth_type PASS
auth_pass qr8hQHuL
}
virtual_ipaddress {
172.16.253.150/32 dev ens33
}
virtual_server 172.16.253.150 80 {
delay_loop 6 \\服务轮询的时间间隔
lb_algo rr \\定义调度方法;
lb_kind DR \\集群的类型;
protocol TCP \\服务协议,仅支持TCP;
sorry_server 127.0.0.1 80 \\指定sorry server,且为本机的wen服务提供的web页面
real_server 172.16.250.127 80 {
weight 1 \\权重
SSL_GET { \\应用层检测
url {
path / \\定义要监控的URL
#digest ff20ad2481f97b1754ef3e12ecd3a9cc \\判断上述检测机制为健康状态的响应的内容的校验码;
status_code 200 \\判断上述检测机制为健康状态的响应码
}
connect_timeout 3 \\连接请求的超时时长;
nb_get_retry 3 \\重试次数
delay_before_retry 1 \\重试之前的延迟时长
}
}
real_server 172.16.253.193 80 {
weight 1
SSL_GET {
url {
path /
#digest ff20ad2481f97b1754ef3e12ecd3a9cc
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 1
}
}
}
[root@keepaliveB ~]# systemctl start keepalived
[root@keepalive-B ~]# ipvsadm
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.16.253.150:http rr
-> 172.16.250.127:http Route 1 0 0
-> 172.16.253.193:http Route 1 0 0
访问测试
client访问测试
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
Web RS1
Web RS2
Web RS1
Web RS2
Web RS1
当keepalive-A故障时
[root@keepaliveA ~]# systemctl stop keepalived
keepalive-B自动成为MASTER主节点,则LVS的director调度器切换至keepalive-B上,LVS-RS1和LVS-RS2的web服务正常使用
client访问测试
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
Web RS2
Web RS1
Web RS2
Web RS1
Web RS2
当LVS-RS1和LVS-RS2的web服务全部故障时
[root@LVS-RS1 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT
[root@LVS-RS2 ~]# iptables -A INPUT -p tcp --dport 80 -j REJECT
client访问到sorry server服务器,且sorry server服务器为keepalive-A
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
sorry from Director-A(keepalive-A)
sorry from Director-A(keepalive-A)
sorry from Director-A(keepalive-A)
sorry from Director-A(keepalive-A)
sorry from Director-A(keepalive-A)
当keepaliveA故障时
[root@keepaliveA ~]# systemctl stop keepalived.service
client访问sorry server服务页面,且sorry server服务器为为keepalive-B
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
sorry from Director-B(keepalive-B)
sorry from Director-B(keepalive-B)
sorry from Director-B(keepalive-B)
sorry from Director-B(keepalive-B)
sorry from Director-B(keepalive-B)
LVS-RS1的web服务恢复正常后
[root@LVS-RS1 ~]# iptables -F
client访问测试
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
Web RS1
Web RS1
Web RS1
Web RS1
Web RS1
LVS-RS1和LVS-RS2的web服务全部恢复正常后
[root@LVS-RS1 ~]# iptables -F
[root@LVS-RS2 ~]# iptables -F
client访问测试
[root@client ~]# for i in {1..10};do curl http://172.16.253.150;done
Web RS2
Web RS1
Web RS2
Web RS1
Web RS2
保存及重载规则
保存:建议保存至/etc/sysconfig/ipvsadm
ipvsadm-save > /PATH/TO/IPVSADM_FILE
ipvsadm -S > /PATH/TO/IPVSADM_FILE
systemctl stop ipvsadm.service
重载
ipvsadm-restore < /PATH/FROM/IPVSADM_FILE
ipvsadm -R < /PATH/FROM/IPVSADM_FILE
systemctl restart ipvsadm.service
keepalive节点通过DNS域名解析指向现实
获取web页面内容的校验码
[root@keepaliveA ~]# genhash -s 172.16.250.127 -p 80 -u /