本文的搭建环境是在centos5.9和Centos6.4下面进行搭建配置的。
IP分配:
centos6.4:192.168.1.199
centos5.9:192.168.1.198
两台服务器都是新安装的系统,纯净的,没安装任何服务。
1、安装nginx
wget http://www.nginx.com.cn/download/nginx-1.3.9.tar.gz
wget ftp://ftp.csx.cam.ac.uk/pub/software/programming/pcre/pcre-8.30.tar.gz
wget http://zlib.net/zlib-1.2.8.tar.gz
wget http://www.openssl.org/source/openssl-1.0.0j.tar.gz
wget http://labs.frickle.com/files/ngx_cache_purge-2.1.tar.gz 图片缓存模块
解压,配置。编译,安装
./configure --user=www --group=www --prefix=/usr/local/nginx --with-openssl=../openssl-1.0.0j --with-zlib=../zlib-1.2.8 --with-pcre=../pcre-8.30 --with-http_ssl_module --add-module=../ngx_cache_purge-2.1 --with-http_sub_module --with-http_stub_status_module
make && make install
--with-pcre --with-zlib=../zlib-1.2.8 --with-openssl均是指向他的源码包,不是指向安装后的地址
安装完毕。接下来就是nginx服务器的配置。
nginx.conf内容
user www www;
worker_processes 10;
error_log /usr/local/nginx/logs/error.log crit;
pid /usr/local/nginx/logs/nginx.pid;
worker_rlimit_nofile 65535;
events
{
use epoll;
worker_connections 65535;
}
http
{
include mime.types;
default_type application/octet-stream;
charset utf-8;
server_names_hash_bucket_size 128;
client_header_buffer_size 32k;
large_client_header_buffers 4 32k;
sendfile on;
tcp_nopush on;
keepalive_timeout 60;
tcp_nodelay on;
gzip on;
gzip_min_length 1k;
gzip_buffers 4 16k;
gzip_http_version 1.1;
gzip_comp_level 2;
gzip_types text/plain application/x-javascript text/css application/xml;
gzip_vary on;
limit_zone crawler $binary_remote_addr 10m;
client_max_body_size 300m;
client_body_buffer_size 128k;
proxy_connect_timeout 10;
proxy_read_timeout 60;
proxy_send_timeout 10;
proxy_buffer_size 16k;
proxy_buffers 4 64k;
proxy_busy_buffers_size 128k;
proxy_temp_file_write_size 128k;
proxy_temp_path /var/proxy_temp_dir;
proxy_cache_path /var/proxy_cache_dir levels=1:2 keys_zone=cache_one:256m inactive=1d
max_size=30g;
include upstream/*.conf;
include vhost/*.conf;
include 301/*.conf;
}
upstream文件内容(反向代理)
upstream 10_10_1_1_pool {
server 10.10.1.1:80 weight=1;
}
upstream 192_168_10_1_pool {
server 192.168.1.1:80 weight=1;
}
vhost文件内容
server
{
listen 80 ;
server_name .ouyanglinux.com;
location /
{
#proxy_next_upstream http_502 http_504 error timeout invalid_header;
proxy_cache cache_one;
proxy_cache_valid 200 304 12h;
proxy_cache_key $host$uri$is_args$args;
proxy_pass http://178_79_148_41_pool;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
expires 1d;
}
location ~/purge(/.*)
{
allow 127.0.0.1;
#allow X.X.X.X/X;
allow all;
deny all;
proxy_cache_purge cache_one $host$1$is_args$args;
}
location ~.*\.(php|jsp|cgi)?$
{
proxy_set_header Host $host;
proxy_set_header X-Forwarded_For $remote_addr;
proxy_pass http://192_168_10_1_pool;
}
location ~.*\.(html|htm|css)?$
{
proxy_set_header Host $host;
proxy_set_header X-Forwarded_For $remote_addr;
proxy_pass http://192_168_10_1_pool;
}
access_log /usr/local/nginx/logs/ouyanglinux.com_access.log;
}
服务器搭建并配置完毕。
接下开始部署lvs了
uname -a
ln -s /usr/src/kernels/2.6.32-358.11.1.el6.x86_64/ /usr/src/linux ### 创建软链接,将当前的kernels连接到/usr/src/linux 否则无法支持IPVS
下载软件
wget http://www.linuxvirtualserver.org/software/kernel-2.6/ipvsadm-1.24.tar.gz
wget http://www.keepalived.org/software/keepalived-1.2.7.tar.gz
安装ipvsadm前需要安装 kernel-devel否则会报错
yum -y install kernel-devel
安装完后,进入到ipvsadm的源码目录直接make && make install
接下来安装keepalived
./configure --prefix=/usr/local/keepalived
编译的时候出现了
configure: error:
!!! OpenSSL is not properly installed on your system. !!!
!!! Can not include OpenSSL headers files. !!!
原因是缺少部分依赖包
yum -y install e2fsprogs-devel keyutils-libs-devel libsepol-devel libselinux-devel krb5-devel zlib-devel openssl-devel popt-devel
./configure --prefix=/usr/local/keepalived && make && make install
把keepalived作为系统服务
cp /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/rc.d/init.d/
cp /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/
mkdir /etc/keepalived
cp /usr/local/keepalived/etc/keepalived/keepalived.conf /etc/keepalived/
cp /usr/local/keepalived/sbin/keepalived /usr/sbin/
做完这些,我们开始修改keepalived的配置文件。首先在lvs_master上修改
LVS_MASTER的配置:
! Configuration File for keepalived
global_defs {
notification_email {
[email protected]
}
notification_email_from [email protected]
smtp_server smtp.163.com
smtp_connect_timeout 30
//LVS
负载均衡标识,在一个网络内,它是唯一标识
router_id LVS_DEVEL ////LVS负载均衡标识,在一个网络内,它是唯一标识
//LVS
负载均衡标识,在一个网络内,它是唯一标识
}
vrrp_script chk_http {
script "/usr/local/keepalived/nginx_pid.sh"
interval 9
weight 1
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_http
}
virtual_ipaddress {
192.168.1.188
}
}
vrrp_instance VI_2{
state BACKUP
interface eth0
virtual_router_id 49
proiority 99
advert_in 1
authentication{
auth_type PASS
auth_pass 1111
}
# virtual_ipaddress{
# 192.168.1.189
# }
}
LVS_BACKUP的 配置文件为
! Configuration File for keepalived
global_defs {
notification_email {
[email protected]
}
notification_email_from [email protected]
smtp_server smtp.163.com
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_http {
script "/usr/local/keepalived/nginx_pid.sh"
interval 9
weight 1
}
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 99
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_http
}
virtual_ipaddress {
192.168.1.188
}
}
vrrp_instance VI_2{
state MASTER
interface eth0
virtual_router_id 49
proiority 100
advert_in 1
authentication{
auth_type PASS
auth_pass 1111
}
# virtual_ipaddress{
# 192.168.1.189
# }
}
配置好了,可以启动keepalived了。
/usr/local/keepalived/sbin/keepalived -D -f /etc/keepalived/keepalived.conf
然后可以用命令查看ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast qlen 1000
link/ether 00:15:5d:01:64:01 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.198/24 brd 192.168.1.255 scope global eth0
inet 192.168.1.188/32 scope global eth0
在从服务器上也可以用此命令查看
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:15:5d:01:64:00 brd ff:ff:ff:ff:ff:ff
inet 192.168.1.199/24 brd 192.168.1.255 scope global eth0
inet 192.168.1.188/32 scope global eth0
inet6 fe80::215:5dff:fe01:6400/64 scope link
valid_lft forever preferred_lft forever
也可以用命令查看日志文件tail -100 /var/log/message
可以看到现在主从主要工作还是在V1上
然后便nginx_pid.sh脚本
#!/bin/bash
A=`ps -C nginx --no-header |wc -l`
if [ $A -eq 0 ];then
/usr/local/nginx/sbin/nginx
sleep 5
if [ `ps -C nginx --no-header |wc -l` -eq 0 ];then
killall keepalived
fi
fi
~
编辑权限
chownwww.www nginx_pid.sh
chmod 744 nginx_pid.sh
下面就是测试了.我们输入http://192.168.1.188http://192.168.1.198 结果都一样。http://192.168.1.199显示是从nginx的信息。
然后关闭主nginx服务器
发现lvs会自动切换到从库上.