iptables和selinux关闭
redhat6.5
server2、server3:集群节点,server4的配置同server1
server4、server1:后端服务器
HAProxy 提供高可用性、负载均衡以及基于 TCP(第四层) 和 HTTP(第七层) 应用的代理,支持虚拟主机,它是免费、快速并且可靠的一种解决案。HAProxy 特别适用于那些负载特大的 web 站点, 这些站点通常又需要会话保持或七层处理。HAProxy 运行在当前的硬件上,完全可以支持数以万计的并发连接。并且它的运行模式使得它可以很简单安全的整合进您当前的架构中, 同时可以保护你的 web 服务器不被暴露到网络上。
//下载时也可用rpm安装包
yum install rpm-build
rpmbuild -tb haproxy-1.6.11.tar.gz //编译
rpm -ivh /root/rpmbuild/RPMS/x86_64/haproxy-1.6.11-1.x86_64.rpm
rpm -qpl rpmbuild/RPMS/x86_64/haproxy-1.6.11-1.x86_64.rpm
tar zxf haproxy-1.6.11.tar.gz //解压tar包,获取配置文件模板
cd haproxy-1.6.11
find -name *.spec //打包工具
cp /root/haproxy-1.6.11/examples/content-sw-sample.cfg /etc/haproxy/haproxy.cfg
cd /etc/haproxy/
vim haproxy.cfg //脚本内容里面规定脚本名称必须是haproxy.cfg
global
log 127.0.0.1 local0 #日志发送的ip和日志级别
maxconn 4096 #并发最大连接数量
chroot /usr/share/haproxy #运行时的根目录
daemon #后台运行
#debug
#quiet
因为设置的运行时目录不存在,所以我们要自己建立目录 mkdir /usr/share/haproxy
listen admin_stats
stats enable
bind *:8080 #监听的ip端口号
mode http #开关
option httplog
log global
stats refresh 30s #统计页面自动刷新时间
stats uri /haproxy #访问的uri ip:8080/haproxy
stats realm haproxy
stats auth admin:admin #认证用户名和密码
stats hide-version #隐藏HAProxy的版本号
stats admin if TRUE #管理界面,如果认证成功了,可通过webui管理节点
defaults
log
global
mode http #默认使用 http 的 7 层模式 tcp: 4 层
option httplog #http 日志格式
option dontlognull #禁用空链接日志
retries 3 #重试 3 次失败认为服务器不可用
option redispatch #当 client 连接到挂掉的机器时,重新分配到健康的主机
maxconn 65535
contimeout 5000
#连接超时
timeout connect 50000 #客户端超时
timeout server 50000 #服务器端超时frontend public
bind *:80 name clear //监听所有ip
default_backend static# The static backend backend for 'Host: img', /img and /css.
backend static
balance roundrobin //算法
server statsrv1 172.25.254.1:80 check inter 1000 后端服务>器IP,check频率1000ms
server statsrv2 172.25.254.2:80 check inter 1000
server1:
[root@server2 ~]# cat /var/www/html/index.html
www.westos.org-server1
[root@server2 ~]# /etc/init.d/httpd startserver4:
[root@server4 ~]# cat /var/www/html/index.html
www.westos.org
[root@server4 ~]# /etc/init.d/httpd start
13 $ModLoad imudp
14 $UDPServerRun 514
42 *.info;mail.none;authpriv.none;cron.none;local0.none /var/log/ messages
62 local0.* /var/log/haproxy.log
52 use_backend static2 if { path_end -i .php }
53 default_backend static1
56 backend static1
57 balance roundrobin
58 #balance source
59 #option httpchk HEAD /favicon.ico
60 server statsrv1 172.25.254.1:80 check inter 1000
61 backend static2
62 balance roundrobin
63 server statsrv2 172.25.254.4:80 check inter 1000
[root@server1 haproxy]# /etc/init.d/haproxy reload[root@server4 ~]# yum install php -y
[root@server4 ~]# cat /var/www/html/index.php
phpinfo()
?>
46 acl blacklist src 172.25.254.71
47 http-request deny if blacklist
46 acl blacklist src 172.25.254.71
47 http-request deny if blacklist
48 errorloc 403 http://172.25.254.2:8080
[root@server2 haproxy]# /etc/init.d/haproxy reload
[root@server2 haproxy]# yum install httpd
[root@server2 haproxy]# cd /var/www/html
[root@server2 html]# vim index.html
系统维护中...
[root@server2 haproxy]# vim /etc/httpd/conf/httpd.conf
Listen 8080重定向到所有的ip访问到server1
[root@server2 haproxy]# vim haproxy.cfg
50 redirect location http://172.25.254.1
server2:
[root@server2 haproxy]# vim haproxy.cfg
47 acl write method POST
48 acl write method PUT
54 #use_backend static2 if { path_end -i .php }
55 use_backend static2 if write
56 default_backend static1
57 backend static1
58 balance roundrobin
59 #balance source
60 #option httpchk HEAD /favicon.ico
61 server statsrv1 172.25.254.1:80 check inter 1000
62 backend static2
63 balance roundrobin
64 server statsrv2 172.25.254.4:80 check inter 1000
[root@server2 haproxy]# /etc/init.d/haproxy reloadserver1:
[root@server1 html]# yum install php -y
[root@server1 html]# scp -r upload/ [email protected]:/var/www/html/
[root@server1 html]# cd upload/
[root@server1 upload]# ls
index.php upload_file.php
[root@server1 upload]# mv * ..
[root@server1 upload]# ls
[root@server1 upload]# cd ..
[root@server1 html]# ls
index.html index.php upload upload_file.php
[root@server1 html]# vim upload_file.php
5 && ($_FILES["file"]["size"] < 2000000))
[root@server2 html]# chmod 777 upload
[root@server2 html]# /etc/init.d/httpd restartserver4:
[root@server4 ~]# cd /var/www/html/
[root@server4 html]# ls
index.html index.php upload
[root@server4 html]# cd upload
[root@server4 upload]# ls
index.php upload_file.php
[root@server4 upload]# mv * ..
mv: overwrite `../index.php'? y
[root@server4 upload]# ls
[root@server4 upload]# cd ..
[root@server4 html]# ls
index.html index.php upload upload_file.php
[root@server4 html]# chmod 777 upload
[root@server4 html]# vim upload_file.php
5 && ($_FILES["file"]["size"] < 2000000))
[root@server4 html]# /etc/init.d/httpd restart
1.pacemaker
pacemaker是一个开源的高可用资源管理器(CRM),位于HA集群架构中资源管理、资源代理(RA)这个层次,它不能提供底层心跳信息传递的功能,要想与对方节点通信需要借助底层的心跳传递服务,将信息通告给对方。(作为通信层和提供关系管理服务,心跳引擎,检测心跳信息)
2.Corosync
Corosync是集群管理套件的一部分,它在传递信息的时候可以通过一个简单的配置文件来定义信息传递的方式和协议等。server2、server3:
[root@server2 haproxy]# yum install pacemaker corosync -y
[root@server2 haproxy]# cp /etc/corosync/corosync.conf.example /etc/corosync/corosync.conf
[root@server2 haproxy]# cd
[root@server2 ~]# yum install crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm -y
//安装crm管理工具
[root@server2 ~]# vim /etc/corosync/corosync.conf
compatibility: whitetanktotem {
version: 2
secauth: off
threads: 0
interface {
ringnumber: 0
bindnetaddr: 172.25.254.0 //两台pacemaker的网段
mcastaddr: 226.94.1.1 //广播地址
mcastport: 5405
ttl: 1
}
}logging {
fileline: off
to_stderr: no
to_logfile: yes
to_syslog: yes
logfile: /var/log/cluster/corosync.log
debug: off
timestamp: on
logger_subsys {
subsys: AMF
debug: off
}
}amf {
mode: disabled
}
service{ //默认没有手动添加
name: pacemaker
ver:0 //连动
}
[root@server2 ~]# /etc/init.d/pacemaker start
[root@server2 ~]# /etc/init.d/corosync start
[root@server2 ~]# chkconfig corosync on
[root@server2 x86_64]# crm_verify -VL //错误检测//接下来只需要在server2操作
[root@server2 ~]# crm
crm(live)# status
Last updated: Fri Aug 3 08:30:36 2018
Last change: Fri Aug 3 08:08:24 2018 via crm_attribute on server1
Stack: cman
Current DC: server1 - partition with quorum
Version: 1.1.10-14.el6-368c726
2 Nodes configured
0 Resources configured
Online: [ server2 server3 ]
crm(live)# configure
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.254.100 cidr_netmask=24 op monitor interval=20s //每隔20s监控一次
crm(live)configure# commit
error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined
error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option
error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity
Errors found during check: config not valid
Do you still want to commit? yes
crm(live)configure# property stonith-enabled=false //关闭这个特性告诉集群:假装故障的节点已经安全的关机了
crm(live)configure# commit
crm(live)configure# cd
crm(live)# resource
crm(live)resource# status vip
resource vip is running on: server2
crm(live)resource# exit
bye
[root@server2 ~]# ip addr //上面显示server2工作,则vip分配给了server2
inet 172.25.254.100/24 scope global secondary eth0
[root@server2 ~]# crm
crm(live)# configure
crm(live)configure# primitive haproxy lsb:haproxy op monitor interval=30s
// haproxy名字,lsb资源代理,每隔30s监控
crm(live)configure# commit
crm(live)configure# group web vip haproxy //绑定
crm(live)configure# commit
crm(live)configure# property no-quorum-policy=ignore
//忽略节点监测,集群至少为2个,实验中只有两个,忽略掉方便实验正常测试
crm(live)configure# commit
crm(live)configure# bye
Bye
物理机配置好fence并开启服务:
[root@foundation71 ~]# yum install fence-virtd-multicast fence-virtd fence-virtd-libvirt -y
[root@foundation71 ~]# fence_virtd -c //编写新的fence 信息
Module search path [/usr/lib64/fence-virt]: /usr/lib64/fence-virt //模块默认位置
Listener module [multicast]: multicast
Multicast IP Address [225.0.0.12]: 225.0.0.12
Using ipv4 as family. //默认的
Multicast IP Port [1229]: 1229
Interface [virbr0]: br0
//这里br0是因为虚拟服务器受主机控制的网卡是br0
Key File [/etc/cluster/fence_xvm.key]: /etc/cluster/fence_xvm.key
Backend module [libvirt]: libvirt
[root@foundation71 ~]# cat /etc/fence_virt.conf //此时查看配置文件
backends {
libvirt {
uri = "qemu:///system";
}}
listeners {
multicast {
port = "1229";
family = "ipv4";
interface = "br0";
address = "225.0.0.12";
key_file = "/etc/cluster/fence_xvm.key";
}}
fence_virtd {
module_path = "/usr/lib64/fence-virt";
backend = "libvirt";
listener = "multicast";
}[root@foundation71 ~]# mkdir /etc/cluster
[root@foundation71 ~]# dd if=/dev/urandom of=/etc/cluster/fence_xvm.key bs=128 count=1
[root@foundation71 ~]# file /etc/cluster/fence_xvm.key //查看文件类型
/etc/cluster/fence_xvm.key: data
[root@foundation71 ~]# systemctl restart fence_virtd
[root@foundation71 ~]# systemctl enable fence_virtd
[root@foundation71 ~]# netstat -anulp | grep fence_virtd
udp 0 0 0.0.0.0:1229 0.0.0.0:* 8657/fence_virtd
[root@foundation71 Desktop]# scp /etc/cluster/fence_xvm.key server2:/etc/cluster/
[root@foundation71 Desktop]# scp /etc/cluster/fence_xvm.key server3:/etc/cluster/
server2:
[root@server2 ~]# ls /etc/cluster/
fence_xvm.key
[root@server2 ~]# crm
crm(live)# configure
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server2:server2;server3:server3" op monitor
interval="1min"
crm(live)configure# commit
crm(live)configure# property stonith-enabled=true
crm(live)configure# commit
crm(live)configure# show
node server2 \
attributes standby="off"
node server3
primitive haproxy lsb:haproxy \
op monitor interval="30s"
primitive vip ocf:heartbeat:IPaddr2 \
params ip="172.25.254.100" cidr_netmask="24" \
op monitor interval="20s"
primitive vmfence stonith:fence_xvm \
params pcmk_host_map="server2:server2;server3:server3" \
op monitor interval="1min"
group web vip haproxy
property $id="cib-bootstrap-options" \
dc-version="1.1.10-14.el6-368c726" \
cluster-infrastructure="classic openais (with plugin)" \
stonith-enabled="true" \
expected-quorum-votes="2" \
[root@server2 ~]# fence_xvm -H server3 //切换至server3,server3为server3
[root@server3 ~]# echo c >/proc/sysrq-trigger //server3会自动开机