配置环境:
主机名 | ip | 服务 |
---|---|---|
server1 | 172.25.1.1 | salt-manster |
server2 | 172.25.1.2 | salt-minion , haproxy |
server3 | 172.25.1.3 | salt-minion , apache |
server4 | 172.25.1.4 | salt-minion ,nginx |
关于环境中的nginx和apache部署脚本访问,此文章只实现haproxy的功能
https://blog.csdn.net/u010489158/article/details/81744757
[rhel-source]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.1.250/rhel6.5
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
[salt]
name=salt
baseurl=http://172.25.1.250/rhel6
enabled=1
gpgcheck=0
[HighAvailability]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.1.250/rhel6.5/HighAvailability
enabled=1
gpgcheck=1
[LoadBalancer]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.1.250/rhel6.5/LoadBalancer
enabled=1
gpgcheck=1
[ResilientStorage]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.1.250/rhel6.5/ResilientStorage
enabled=1
gpgcheck=1
[ScalableFileSystem]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.1.250/rhel6.5/ScalableFileSystem
enabled=1
gpgcheck=1
[root@server1 ~]# cd /srv/salt/
[root@server1 salt]# mkdir haproxy #存放关于haproxy安装配置的脚本
[root@server1 salt]# cd haproxy/
[root@server1 haproxy]# vim yum.sls #推送拓展yum源额脚本
yum-install:
file.managed: #对文件的管理
- name: /etc/yum.repos.d/rhel-source.repo #将source中的文件放到name的位置
- source: salt://haproxy/file/rhel-source.repo
cmd.run: #执行shell命令
- name: yum clean all
[root@server1 haproxy]# vim make.sls #haproxy的安装脚本
include:
- haproxy.yum #包含haproxy目录下的yum.sls脚本
haproxy-install: #关于haproxy的安装
pkg.installed: #利用yum进行安装
- pkgs: #指定安装包
- rpm-build
file.managed:
- name: /root/haproxy-1.6.11.tar.gz
- source: salt://haproxy/file/haproxy-1.6.11.tar.gz
cmd.run: #运行shell命令
- name: yum install rpm-build && cd /root && rpmbuild -tb haproxy-1.6.11.tar.gz && tar zxf haproxy-1.6.11.tar.gz && cd /rpmbuild/RPMS/x86_64/ && rpm -ivh haproxy-1.6.11-1.x86_64.rpm
- create: /etc/haproxy #如果此目录存在则不执行安装
[root@server1 users]# cd /srv/salt/users
[root@server1 users]# vim haproxy.sls #创建haproxy用户
haproxy-group: #用户组的建立
group.present:
- name: haproxy
- gid: 200
haproxy-user: #用户的建立
user.present:
- name: haproxy
- uid: 200
- gid: 200
- shell: /sbin/nologin
- createhome: False
- home: /usr/local/nginx
[root@server1 haproxy]# vim service.sls #关于服务启动的脚本
include: #包含haproxy中的make.sls 和 users下的haproxy.sls
- haproxy.make
- users.haproxy
/etc/haproxy/haproxy.cfg: #客户端的目录id,将source中的文件拷贝到此ID位置
file.managed:
- source: salt://haproxy/file/haproxy.cfg
haproxy-service: #关于haproxy的服务启动
service.running:
- enable: True #开机自动启动
- name: haproxy #haproxy服务
- reload: True #如果watch中的文件发生修改则进行reload
- watch: #监控file中的文件
- file: /etc/haproxy/haproxy.cfg
- require:
- user: haproxy
[root@server1 file]# ls #file目录中存放了haproxy的压缩包,haproxy的配置文件,拓展yum源的配置文件
haproxy-1.6.11.tar.gz haproxy.cfg rhel-source.repo
[root@server1 file]# cat haproxy.cfg
# This is a sample configuration. It illustrates how to separate static objects
# traffic from dynamic traffic, and how to dynamically regulate the server load.
#
# It listens on 192.168.1.10:80, and directs all requests for Host 'img' or
# URIs starting with /img or /css to a dedicated group of servers. URIs
# starting with /admin/stats deliver the stats page.
#
global
maxconn 10000
stats socket /var/run/haproxy.stat mode 600 level admin
log 127.0.0.1 local0
uid 200
gid 200
chroot /var/empty
daemon
# The public 'www' address in the DMZ
frontend public
bind *:80 name clear
#bind 192.168.1.10:443 ssl crt /etc/haproxy/haproxy.pem
mode http
log global
option httplog
option dontlognull
monitor-uri /monitoruri
maxconn 8000
timeout client 30s
stats uri /admin/stats
#use_backend static if { hdr_beg(host) -i img }
#use_backend static if { path_beg /img /css }
default_backend static #默认将访问发送到static的后端服务器群
# The static backend backend for 'Host: img', /img and /css.
backend static
mode http
balance roundrobin
option prefer-last-server
retries 2
option redispatch
timeout connect 5s
timeout server 5s
#option httpchk HEAD /favicon.ico
server statsrv1 172.25.1.3:80 check inter 1000 #定义后端的服务器
server statsrv2 172.25.1.4:80 check inter 1000
# the application servers go here
backend dynamic
mode http
balance roundrobin
retries 2
option redispatch
timeout connect 5s
timeout server 30s
timeout queue 30s
option httpchk HEAD /login.php
cookie DYNSRV insert indirect nocache
fullconn 4000 # the servers will be used at full load above this number of connections
server dynsrv1 192.168.1.1:80 minconn 50 maxconn 500 cookie s1 check inter 1000
server dynsrv2 192.168.1.2:80 minconn 50 maxconn 500 cookie s2 check inter 1000
server dynsrv3 192.168.1.3:80 minconn 50 maxconn 500 cookie s3 check inter 1000
server dynsrv4 192.168.1.4:80 minconn 50 maxconn 500 cookie s4 check inter 1000
[root@server1 file]# cat rhel-source.repo
[rhel-source]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.1.250/rhel6.5
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
[salt-source]
name=Red Hat Enterprise Linux $releasever - $basearch - Source
baseurl=http://172.25.1.250/rhel6
enabled=1
gpgcheck=0
[root@server1 ~]# cd /srv/salt/
[root@server1 salt]# vim top.sls
base:
'server2':
- haproxy.service
'server3':
- apache.install
'server4':
- nginx.service
[root@server1 salt]# salt '*' state.highstate #高级推送
[root@server1 ~]# curl 172.25.1.2
this is apache
[root@server1 ~]# curl 172.25.1.2
this is nginx!!!!
[root@server1 ~]# curl 172.25.1.2
this is apache
[root@server1 ~]# curl 172.25.1.2
this is nginx!!!!
[root@server2 ~]# netstat -nutlp |grep 80
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 7535/haproxy
[root@server3 ~]# netstat -nutlp |grep 80
tcp 0 0 :::80 :::* LISTEN 2650/httpd
[root@server4 mnt]# netstat -nutlp |grep 80
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 10438/nginx