nginx+keeplived+tomcat+redis+mysql主从+sission




IP 信息列表: 

名称 IP 软件
-------------------------------------------------
VIP1 192.168.222.254 
VIP2 192.168.222.253
nginx-1 192.168.222.2 nginx keepalived
nginx-2 192.168.222.3 nginx keepalived
node-1 192.168.222.4 tomcat redis
node-2 192.168.222.5 tomcat redis
mysql-1 192.168.222.6 mysql-master
mysql-2 192.168.222.7 mysql-backup
-------------------------------------------------

所有机器关闭防火墙及Selinux:
[root@localhost ~]# service iptables stop
[root@localhost ~]# setenforce 0
设置hosts文件以及主机名
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
nginx-1 192.168.222.2
nginx-2 192.168.222.3
node-1 192.168.222.4
node-2 192.168.222.5
mysql-1 192.168.222.6
mysql-2 192.168.222.7



安装nginx+keepalived设置VIP漂移
===============================

[root@nginx-1 ~]# yum -y install pcre-devel zlib-devel openssl-devel
[root@nginx-1 ~]# useradd -M -s /sbin/nologin nginx
[root@nginx-1 ~]# tar xf nginx-1.6.2.tar.gz 
[root@nginx-1 ~]# cd nginx-1.6.2
[root@nginx-1 nginx-1.6.2]# ./configure --prefix=/usr/local/nginx --user=nginx --group=nginx --with-file-aio --with-http_stub_status_module --with-http_ssl_module --with-http_flv_module --with-http_gzip_static_module && make && make install

--prefix=/usr/local/nginx #指定安装目录
--user=nginx --group=nginx #指定运行的用户和组
--with-file-aio #启用文件修改支持
--with-http_stub_status_module #启用状态统计
--with-http_ssl_module #启用ssl模块
--with-http_flv_module #启用flv模块,提供寻求内存使用基于时间的偏移量文件
--with-http_gzip_static_module #启用gzip静态压缩


配置nginx.conf
[root@nginx-1 nginx-1.6.2]# cp /usr/local/nginx/conf/nginx.conf{,.bak}
[root@nginx-1 nginx-1.6.2]# vim /usr/local/nginx/conf/nginx.conf
=================================================================================================================
user nginx;
worker_processes 1;
error_log logs/error.log;
pid logs/nginx.pid;

events {
use epoll;
worker_connections 10240;
}

http {
include mime.types;
default_type application/octet-stream;

log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log logs/access.log main;

sendfile on;
keepalive_timeout 65;
upstream tomcat_server {
server 192.168.222.4:8080 weight=1;
server 192.168.222.5:8080 weight=1;
}

server {
listen 80;
server_name localhost;

location / {
root html;
index index.html index.htm;
proxy_pass http://tomcat_server;
}

error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}
=================================================================================================================
[root@nginx-1 nginx-1.6.2]# /usr/local/nginx/sbin/nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful

[root@nginx-1 nginx-1.6.2]# /usr/local/nginx/sbin/nginx -c /usr/local/nginx/conf/nginx.conf
[root@nginx-1 nginx-1.6.2]# netstat -anpt |grep :80
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 7184/nginx 

[root@nginx-1 nginx-1.6.2]# ps aux |grep nginx
root 7184 0.0 0.2 45000 1052 ? Ss 01:18 0:00 nginx: master process 






在nginx1/2上编译安装keepalived服务:
[root@nginx-1 ~]# yum -y install kernel-devel openssl-devel

[root@nginx-1 ~]# tar xf keepalived-1.2.13.tar.gz 
[root@nginx-1 ~]# cd keepalived-1.2.13
[root@nginx-1 keepalived-1.2.13]# ./configure --prefix=/ --with-kernel-dir=/usr/src/kernels/2.6.32-504.el6.x86_64/ && make && make install
[root@nginx-1 ~]# chkconfig --add keepalived
[root@nginx-1 ~]# chkconfig keepalived on
[root@nginx-1 ~]# chkconfig --list keepalived 

3、修改keepalived配置文件
[root@nginx-1 ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived

global_defs {
notification_email {
[email protected]
}
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}


vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 123
}
virtual_ipaddress {
192.168.222.254
}
}

vrrp_instance VI_2 {
state MASTER
interface eth0 
virtual_router_id 52
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123
}
virtual_ipaddress {
192.168.222.253
}
}
=================================================================================================================
[root@nginx-2 ~]# vim /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
notification_email {
[email protected]
}
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_DEVEL
}


vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123
}
virtual_ipaddress {
192.168.222.254
}
}

vrrp_instance VI_2 {
state BACKUP
interface eth0 
virtual_router_id 52
priority 50
advert_int 1
authentication {
auth_type PASS
auth_pass 123
}
virtual_ipaddress {
192.168.222.253
}
}

[root@nginx-1 ~]# service keepalived start
[root@nginx-1 ~]# ip addr show dev eth0
2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:2d:3d:97 brd ff:ff:ff:ff:ff:ff
inet 192.168.222.2/24 brd 192.168.200.255 scope global eth0
inet 192.168.222.254/32 scope global eth0
inet6 fe80::20c:29ff:fe2d:3d97/64 scope link 
valid_lft forever preferred_lft forever


[root@nginx-2 ~]# service keepalived start
[root@nginx-2 ~]# ip addr show dev eth0
2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:6f:7d:87 brd ff:ff:ff:ff:ff:ff
inet 192.168.222.3/24 brd 192.168.200.255 scope global eth0
inet 192.168.222.253/32 scope global eth0
inet6 fe80::20c:29ff:fe6f:7d87/64 scope link 
valid_lft forever preferred_lft forever

客户端测试:
打开浏览器访问: http://192.168.222.253 #不断刷新可看到由于权重相同,页面会反复切换
客户端测试:
打开浏览器访问: http://192.168.222.254 #不断刷新可看到由于权重相同,页面会反复切换 


nginx-1/2 二台机器都执行监控Nginx进程的脚本
[root@nginx-1 ~]# cat nginx_pidcheck 
#!/bin/bash
while :
do
nginxpid=`ps -C nginx --no-header | wc -l`
if [ $nginxpid -eq 0 ]
then
/usr/local/nginx/sbin/nginx
keeppid=$(ps -C keepalived --no-header | wc -l)
if [ $keeppid -eq 0 ]
then
/etc/init.d/keepalived start
fi
sleep 5
nginxpid=`ps -C nginx --no-header | wc -l`
if [ $nginxpid -eq 0 ]
then
/etc/init.d/keepalived stop
fi
fi
sleep 5
done

[root@nginx-1 ~]# sh nginx_pidcheck &
[root@nginx-1 ~]# vim /etc/rc.local
sh nginx_pidcheck &

这是执行无限循环的脚本,两台Nginx机器上都有执行此脚本,每隔5秒执行一次,用ps -C是命令来收集nginx的PID值到底是否为0,如果是0的话,即Nginx已经进程死掉,尝试启动nginx进程;如果继续为0,即Nginx启动失败,则关闭本机的Keeplaived服务,VIP地址则会由备机接管,当然了,整个网站就会全部由备机的Nginx来提供服务了,这样保证Nginx服务的高可用。


脚本测试:
[root@nginx-1 ~]# netstat -anpt |grep nginx
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 4321/nginx 
[root@nginx-1 ~]# killall -s QUIT nginx
[root@nginx-1 ~]# netstat -anpt |grep nginx
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 59418/nginx 

VIP转移测试:
[root@nginx-1 ~]# ip addr show dev eth0 
2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:2d:3d:97 brd ff:ff:ff:ff:ff:ff
inet 192.168.222.2/24 brd 192.168.200.255 scope global eth0
inet 192.168.222.254/32 scope global eth0
inet6 fe80::20c:29ff:fe2d:3d97/64 scope link 
valid_lft forever preferred_lft forever

[root@nginx-2 ~]# service keepalived stop
停止 keepalived: [确定]


[root@nginx-1 ~]# ip addr show dev eth0 
2: eth0: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:2d:3d:97 brd ff:ff:ff:ff:ff:ff
inet 192.168.222.2/24 brd 192.168.200.255 scope global eth0
inet 192.168.222.254/32 scope global eth0
inet 192.168.222.253/32 scope global eth0
inet6 fe80::20c:29ff:fe2d:3d97/64 scope link 
valid_lft forever preferred_lft forever



=======================================================================
=====================================================================
==================================================================
nginx-1/2配置结束





安装配置JDK和Tomcat服务器:
=================================================================================================================


安装JDK node1 && node2:

安装方式:
.tar.gz .bin .rpm

0.卸载系统默认的JDK
[root@node1 ~]# rpm -qa | grep jdk
java-1.6.0-openjdk-devel-1.6.0.0-11.1.13.4.el6.x86_64
java-1.6.0-openjdk-1.6.0.0-11.1.13.4.el6.x86_64
java-1.7.0-openjdk-1.7.0.65-2.5.1.2.el6_5.x86_64
java-1.7.0-openjdk-devel-1.7.0.65-2.5.1.2.el6_5.x86_64

[root@node1 ~]# rpm -e java-1.6.0-openjdk-devel
[root@node1 ~]# rpm -e java-1.6.0-openjdk
[root@node1 ~]# rpm -e java-1.7.0-openjdk-devel
[root@node1 ~]# rpm -e java-1.7.0-openjdk --nodeps
[root@node1 ~]# java -version
-bash: java: command not found

1.将jdk-7u65-linux-x64.tar.gz软件包解压
[root@node1 ~]# tar xf jdk-7u65-linux-x64.tar.gz

2.将文件夹jdk1.7.0_65移动到/usr/local下并重命名为java
[root@node1 ~]# mv jdk1.7.0_65/ /usr/local/java

3.在/etc/profile文件中添加环境变量
[root@node1 ~]# vim /etc/profile
export JAVA_HOME=/usr/local/java #设置java根目录
export PATH=$PATH:$JAVA_HOME/bin #在PATH环境变量中添加java跟目录的bin子目录

4.执行脚本导入环境变量,使其生效
[root@node1 ~]# source /etc/profile

5.运行 java -version 或者 javac -version 命令查看java版本
[root@node1 ~]# java -version
java version "1.7.0_45"
OpenJDK Runtime Environment (rhel-2.4.3.3.el6-x86_64 u45-b15)
OpenJDK 64-Bit Server VM (build 24.45-b08, mixed mode)
[root@node1 ~]# javac -version
javac 1.7.0_45


安装配置tomcat node1 && node2
解压apache-tomcat-7.0.54.tar.gz软件包
[root@node1 ~]# tar xf apache-tomcat-7.0.54.tar.gz 

解压后生成apache-tomcat-7.0.54文件夹,将该文件夹移动到/usr/local下,并改名为tomcat
[root@node1 ~]# mv apache-tomcat-7.0.54 /usr/local/tomcat

启动Tomcat
[root@node1 ~]# /usr/local/tomcat/bin/startup.sh 
Using CATALINA_BASE: /usr/local/tomcat
Using CATALINA_HOME: /usr/local/tomcat
Using CATALINA_TMPDIR: /usr/local/tomcat/temp
Using JRE_HOME: /usr/local/java
Using CLASSPATH: /usr/local/tomcat/bin/bootstrap.jar:/usr/local/tomcat/bin/tomcat-juli.jar
Tomcat started.

访问测试http://192.168.222.254
访问测试http://192.168.222.253


Session复制:
Tomcat支持Session集群,可在各Tomcat服务器间复制全部session信息,当后端一台Tomcat服务器宕机后,Nginx重新调度用户请求后,在其它正常的Tomcat服务上依然存在用户原先的session信息;

Session集群可在Tomcat服务器规模(一般10台以下)不大时使用,否则会导致复制代价过高;

[root@node1 ~]# vim /usr/local/tomcat/conf/server.xml
将后面这一行修改为:

#node2 配置为jvmRoute="node2"

#去掉注释

[root@node1 ~]# tail -2 /usr/local/tomcat/webapps/ROOT/WEB-INF/web.xml 
#添加内容


如果是仅主机模式添加一条路由
route add -net 224.0.0.0 netmask 240.0.0.0 dev eth0

在webapp目录下建立一个index.jsp的测试页面
[root@node1 ~]# vim /usr/local/tomcat/webapps/ROOT/session.jsp
Session ID:<%= session.getId() %>

SessionPort:<%= request.getServerPort() %>
<% out.println("This tomcat server 192.168.222.4");%>

[root@node2 ~]# vim /usr/local/tomcat/webapps/ROOT/session.jsp
Session ID:<%= session.getId() %>

SessionPort:<%= request.getServerPort() %>
<% out.println("This tomcat server 192.168.222.5");%>


关闭tomcat,在重新启动
[root@node1 ~]# /usr/local/tomcat/bin/shutdown.sh && /usr/local/tomcat/bin/startup.sh
Using CATALINA_BASE: /usr/local/tomcat
Using CATALINA_HOME: /usr/local/tomcat
Using CATALINA_TMPDIR: /usr/local/tomcat/temp
Using JRE_HOME: /usr/local/java
Using CLASSPATH: /usr/local/tomcat/bin/bootstrap.jar:/usr/local/tomcat/bin/tomcat-juli.jar

Using CATALINA_BASE: /usr/local/tomcat
Using CATALINA_HOME: /usr/local/tomcat
Using CATALINA_TMPDIR: /usr/local/tomcat/temp
Using JRE_HOME: /usr/local/java
Using CLASSPATH: /usr/local/tomcat/bin/bootstrap.jar:/usr/local/tomcat/bin/tomcat-juli.jar
Tomcat started.



访问测试http://192.168.222.2/session.jsp
访问测试http://192.168.222.3/session.jsp
IP 和 名称会切换 但是sessionID不变


安装redis
[root@node1 ~]# tar zxf redis-3.2.0.tar.gz -C /usr/src/
[root@node1 ~]# cd /usr/src/redis-3.2.0/
[root@node-1 redis]# make && make install
复制配置文件
[root@node-1 redis]# cp redis.conf /etc/


save * * : 保存快照的频率,第一个*表示多长时间,第三个*表示执行多少次写操作。在一定时间内执行一定数量的写操作时,自动保存快照。可设置多个条件。
rdbcompression:是否使用压缩
dbfilename:数据快照文件名(只是文件名,不包括目录)
dir:数据快照的保存目录(这个是目录)
appendonly:是否开启appendonlylog,开启的话每次写操作会记一条log,这会提高数据抗风险能力,但影响效率。
appendfsync:appendonlylog如何同步到磁盘(三个选项,分别是每次写都强制调用fsync、每秒启用一次fsync、不调用fsync等待系统自己同步)

====================================================================
====================================================================
从node2
在“# slaveof ”下添加slaveof 192.168.222.4 6379 修改监听地址
[root@node-2 redis]# vim /etc/redis.conf
61 bind 192.168.222.5
127 daemonize yes 
264 # slaveof
265 slaveof 192.168.222.4 6379

node-1上修改配置文件
[root@node-1 redis]# vim /etc/redis.conf
61 bind 192.168.222.4 #监听地址 //修改
84 port 6379 #监听端口
105 timeout 0 #请求超时时间
127 daemonize yes #在后台开启 //修改
149 pidfile /var/run/redis.pid #pid存放位置
157 loglevel notice #log信息级别
162 logfile "/var/logs/redis.log" #日志文件位置
177 databases 16 #开启数据库的数量



开启redis服务
[root@node-1 redis]# src/redis-server /etc/redis.conf #指定配置文件 
[root@node-1 redis]# src/redis-cli -h 192.168.222.4 #运行客户端
192.168.222.4:6379> set name k #插入数据 
OK
192.168.222.4:6379> get name #提取数据
"k"

====redis工具=========================================================
redis-server:Redis服务器的daemon启动程序 |
redis-cli:Redis命令行操作工具。也可以用telnet根据其纯文本协议来操作 |
redis-benchmark:Redis性能测试工具,测试Redis在当前系统下的读写性能 |
redis-check-aof:数据修复 |
redis-check-dump:检查导出工具 |
=====================================================================

在nedo-2上修改配置文件 “# slaveof ”下添加slaveof 192.168.222.4 6379 修改监听地址
[root@node-2 redis]# vim /etc/redis.conf
61 bind 192.168.222.5
264 # slaveof
265 slaveof 192.168.222.4 6379

启动redis
[root@node-2 redis]# src/redis-server /etc/redis.conf
客户端连接
[root@node-2 redis]# src/redis-cli -h 192.168.222.5
192.168.222.5:6379> get name 可以从node-2上提取出在node-1上插入的数据
"k" 证明redis主从复制正常
192.168.222.5:6379> quit
连接node-1上清除插入 
[root@node-2 redis]# src/redis-cli -h 192.168.222.4
192.168.222.4:6379> get name
"k"
192.168.222.4:6379> del name
(integer) 1
192.168.222.4:6379> get name 
(nil)
192.168.222.4:6379> quit

tomcat与redis建立连接
[root@node-2 redis]# vim /usr/local/tomcat/conf/context.xml
将以下内容插入

host="192.168.222.4" 
port="6379" 
database="0" 
maxInactiveInterval="60" />
导入jar包
[root@node-2 ~]# mv jedis-2.7.2.jar tomcat-redis-session-manager1.2.jar commons-pool2-2.4.1.jar /usr/local/tomcat/lib
[root@node-2 redis]# /usr/local/tomcat/bin/shutdown.sh
[root@node-2 redis]# /usr/local/tomcat/bin/startup.sh
访问测试。验证结果



==============================================================
[root@mysql-1 ~]# yum -y install mysql mysql-devel mysql-server
[root@mysql-1 ~]# service mysqld start


安装时间同步ntp服务
MASTER:
[root@mysql-1 ~]# yum -y install ntp
[root@mysql-1 ~]# vi /etc/ntp.conf
server 127.127.1.0
fudge 127.127.1.0 startum 8
[root@www ~]# service ntpd restart
[root@www ~]# chkconfig ntpd on

与主服务器同步时间
[root@mysql-2 ~]# yum -y install ntpdate
[root@mysql-2 ~]# ntpdate 192.168.222.6
13 May 12:29:14 ntpdate[20577]: step time server 192.168.222.6 offset -0.751071 sec

修改主服务器配置文件:/etc/my.cnf 
[root@mysql-1 ~]# vi /etc/my.cnf 
server-id=1 #在[mysqld]添加以下内容
log-bin=binlog
binlog-do-db=slsaledb

重新启动服务
[root@mysql-1 ~]# service mysqld restart
停止 mysqld: [确定]
正在启动 mysqld: [确定]
[root@mysql-1 ~]# 

[root@mysql-1 ~]# mysql -u root
mysql> grant replication slave on *.* to 'root'@'192.168.222.%' identified by '';
Query OK, 0 rows affected (0.00 sec)
mysql> flush privileges;
Query OK, 0 rows affected (0.00 sec)
mysql> show master status;
+---------------+----------+--------------+------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB |
+---------------+----------+--------------+------------------+
| binlog.000001 | 106 | slsaledb | |
+---------------+----------+--------------+------------------+
1 row in set (0.00 sec)

=======================
slave 配置 
[root@mysql-2 ~]# vi /etc/my.cnf 
server-id=2 #在[mysqld]下添加
[root@mysql-2 ~]# service mysqld restart
[root@mysql-2 ~]# mysql -u root
mysql> change master to master_host='192.168.222.6',master_user='root',master_password='',master_log_file
='binlog.000001',master_log_pos=106;Query OK, 0 rows affected (0.20 sec)

mysql> start slave;
Query OK, 0 rows affected (0.00 sec)

mysql> show slave status\G
*************************** 1. row ***************************
Slave_IO_State: Waiting for master to send event
Master_Host: 192.168.222.6
Master_User: root
Master_Port: 3306
Connect_Retry: 60
Master_Log_File: binlog.000001
Read_Master_Log_Pos: 106
Relay_Log_File: mysqld-relay-bin.000002
Relay_Log_Pos: 248
Relay_Master_Log_File: binlog.000001
Slave_IO_Running: Yes
Slave_SQL_Running: Yes
Replicate_Do_DB: 
Replicate_Ignore_DB: 
Replicate_Do_Table: 
Replicate_Ignore_Table: 
Replicate_Wild_Do_Table: 
Replicate_Wild_Ignore_Table: 
Last_Errno: 0
Last_Error: 
Skip_Counter: 0
Exec_Master_Log_Pos: 106
Relay_Log_Space: 404
Until_Condition: None
Until_Log_File: 
Until_Log_Pos: 0
Master_SSL_Allowed: No
Master_SSL_CA_File: 
Master_SSL_CA_Path: 
Master_SSL_Cert: 
Master_SSL_Cipher: 
Master_SSL_Key: 
Seconds_Behind_Master: 0
Master_SSL_Verify_Server_Cert: No
Last_IO_Errno: 0
Last_IO_Error: 
Last_SQL_Errno: 0
Last_SQL_Error: 
1 row in set (0.00 sec)

验证是否同步成功
create database slsaledb; 创建库
show databases; 查看库
=================================================
部署web应用
上传WEB应用SLSaleSystem到/usr/local/tomcat/webapps

[root@node-1 tomcat]# cp -rpf SLSaleSystem/ /usr/local/tomcat/webapps/
[root@node-1 tomcat]# vim /usr/local/tomcat/conf/server.xml
124
125


docBase="SLSaleSystem" web应用的文档基准目录
path="" 设置默认"类"
reloadable="flase" 设置监视"类"是否变化
debug 调试


关联数据库:
[root@node-1 tomcat]# cat /usr/local/tomcat/webapps/SLSaleSystem/WEB-INF/classes/jdbc.propert
ies driverClassName=com.mysql.jdbc.Driver
url=jdbc\:mysql\://192.168.222.6\:3306/slsaledb?useUnicode\=true&characterEncoding\=UTF-8
uname=root
password=
minIdle=10
maxIdle=50
initialSize=5
maxActive=100
maxWait=100
removeAbandonedTimeout=180
removeAbandoned=true

[root@node1 ~]# /usr/local/tomcat/bin/shutdown.sh
[root@node1 ~]# /usr/local/tomcat/bin/startup.sh
在mysql-1上授权用户
grant all on slsaledb.* to 'root'@'192.168.222.%' identified by '';
flush privileges;
将sql文件导入到数据库
[root@mysql-1 ~]# mysql -u root slsaledb < slsaledb-2014-4-10.sql 
访问测试





如转载或下载请附上本博客链接

转载于:https://www.cnblogs.com/pengFei666888/p/11226747.html

你可能感兴趣的:(nginx+keeplived+tomcat+redis+mysql主从+sission)