Linux运维:Pacemaker + Nginx

一、配置RS主机(server2、server3)

  • 注意:RS主机做同样配置,以默认发布文件区分
[root@server2 ~]# yum install -y httpd
[root@server2 ~]# vim /var/www/html/index.html
[root@server2 ~]# /etc/init.d/httpd start
Starting httpd: httpd: Could not reliably determine the server's fully qualified domain name, using 172.25.12.2 for ServerName
                                                           [  OK  ]
[root@server2 ~]# curl localhost

server-2

[root@server3 ~]# curl localhost

server-3

二、sever1主机配置nginx

1、源码安装nginx
1、源码安装nginx
[root@server1 ~]# ls
nginx-1.14.0.tar.gz
[root@server1 ~]# tar zxf nginx-1.14.0.tar.gz 
[root@server1 ~]# ls
nginx-1.14.0  nginx-1.14.0.tar.gz
[root@server1 ~]# cd nginx-1.14.0
[root@server1 nginx-1.14.0]# vim auto/cc/gcc 
[root@server1 nginx-1.14.0]# vim src/core/nginx.h
[root@server1 nginx-1.14.0]# ./configure --prefix=/usr/local/nginx --with-http_ssl_module --with-http_stub_status_module --with-threads --with-file-aio

##########解决依赖性########
[root@server1 nginx-1.14.0]# yum install -y gcc
[root@server1 nginx-1.14.0]# yum install -y pcre-devel
[root@server1 nginx-1.14.0]# yum install -y openssl-devel
###########################

[root@server1 nginx-1.14.0]# make && make install
[root@server1 nginx-1.14.0]# ln -s /usr/local/nginx/sbin/nginx /sbin/
2、调试
[root@server1 nginx-1.14.0]# nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful
[root@server1 nginx-1.14.0]# nginx 
[root@server1 nginx-1.14.0]# curl -I localhost
HTTP/1.1 200 OK
Server: nginx
Date: Wed, 27 Jun 2018 15:22:35 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Wed, 27 Jun 2018 15:21:00 GMT
Connection: keep-alive
ETag: "5b33ab5c-264"
Accept-Ranges: bytes

三、实现nginx负载均衡

1、配置nginx
[root@server1 nginx-1.14.0]# useradd nginx
[root@server1 nginx-1.14.0]# vim /usr/local/nginx/conf/nginx.conf
  2 user  nginx nginx;
  3 worker_processes  1;
 18         upstream test{
 19         server 172.25.12.2:80;
 20         server 172.25.12.3:80;
 21         }
123 server {
124         listen 80;
125         server_name     www.test.org;
126          location / {
127                 proxy_pass http://test;
128          }
129 
130         }

[root@server1 nginx-1.14.0]# nginx -t
nginx: [emerg] unexpected "}" in /usr/local/nginx/conf/nginx.conf:128
nginx: configuration file /usr/local/nginx/conf/nginx.conf test failed
[root@server1 nginx-1.14.0]# vim /usr/local/nginx/conf/nginx.conf
[root@server1 nginx-1.14.0]# nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful
[root@server1 nginx-1.14.0]# nginx -s reload
2、物理主机测试ok
[root@foundation12 ~]# vim /etc/hosts
  172.25.12.1   www.test.org
[root@foundation12 ~]# curl www.test.org

server-2

[root@foundation12 ~]# curl www.test.org

server-3

[root@foundation12 ~]# curl www.test.org

server-2

[root@foundation12 ~]# curl www.test.org

server-3

四、实现高可用

1、server4主机配置nginx
[root@server1 nginx-1.14.0]# scp -r /usr/local/nginx/ server4:/usr/local/
[root@server4 local]# ln -s /usr/local/nginx/sbin/nginx /sbin/
[root@server4 local]# useradd nginx
[root@server4 local]# nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful
2、server1和server4配置pacemaker
  • 注意:nginx关闭,采用服务管理
  • 注意:配置yum源
  • 注意:server1和server4主机配置要一致
yum install -y corosync pacemaker
cd /etc/corosync/
cp corosync.conf.example corosync.conf
vim corosync.conf
#################
 10                 bindnetaddr: 172.25.12.0
 11                 mcastaddr: 226.94.1.12
 12                 mcastport: 5405
 34 service {
 35         name: pacemaker
 36         ver: 0
 37 }
#################
scp corosync.conf server4:/etc/corosync/
/etc/init.d/corosync start  (server1和server4主机都做)
yum install -y crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm
3、配置crm
##出现下面报错时:进入crm,关闭fence检查即可
[root@server1 cluster]# crm_verify -LV
   error: unpack_resources:     Resource start-up disabled since no STONITH resources have been defined
   error: unpack_resources:     Either configure some or disable STONITH with the stonith-enabled option
   error: unpack_resources:     NOTE: Clusters with shared data need STONITH to ensure data integrity
Errors found during check: config not valid

[root@server1 cluster]# crm
crm(live)# configure 
crm(live)configure# property stonith-enabled=false
crm(live)configure# commit

[root@server1 cluster]# crm_verify -LV
4、实现高可用
[root@server1 ~]# crm 
crm(live)# configure 
crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.12.100 cidr_netmask=32 op monitor interval=1min
crm(live)configure# commit

crm(live)configure# property no-quorum-policy=ignore
crm(live)configure# commit

##注意 lsb:nginx 需要在 /etc/init.d/ 存入nginx脚本

crm(live)configure# primitive nginx lsb:nginx op monitor interval=30s
crm(live)configure# commit

crm(live)configure# group nginxgroup vip nginx 
crm(live)configure# commit

##添加fence时,没有fence_xvm
crm(live)configure# property stonith-enabled=true
crm(live)configure# primitive vmfence stonith:fence_
fence_legacy   fence_pcmk 
5、排错
  • 物理主机
[root@foundation12 rhel6.5]# systemctl status fence_virtd.service 
● fence_virtd.service - Fence-Virt system host daemon
   Loaded: loaded (/usr/lib/systemd/system/fence_virtd.service; enabled; vendor preset: disabled)
   Active: active (running) since Wed 2018-06-27 20:03:49 CST; 3h 59min ago
  Process: 1640 ExecStart=/usr/sbin/fence_virtd $FENCE_VIRTD_ARGS (code=exited, status=0/SUCCESS)
 Main PID: 2297 (fence_virtd)
   CGroup: /system.slice/fence_virtd.service
           └─2297 /usr/sbin/fence_virtd -w
  • server1主机(配置fence_xvm.key文件)
[root@server1 cluster]# pwd
/etc/cluster
[root@server1 cluster]# ls
fence_xvm.key
  • server4主机(配置fence_xvm.key文件)
[root@server4 cluster]# pwd
/etc/cluster
[root@server4 cluster]# ls
fence_xvm.key
注意:Server1和server4主机安装fence-virt
##fence_virt可在 www.pcks.org 下载
yum install -y fence-virt-0.2.3-24.el6.x86_64.rpm
  • 再次进入crm设置ok
[root@server1 ~]# crm 
crm(live)# configure
crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server1:vm1;server4:vm4" op monitor interval=30s
crm(live)configure# commit
crm(live)configure# property stonith-enabled=true
crm(live)configure# commit
6、高可用状态
Online: [ server1 server4 ]

 Resource Group: nginxgroup
     vip        (ocf::heartbeat:IPaddr2):   Started server1
     nginx  (lsb:nginx):    Started server1
vmfence (stonith:fence_xvm):    Started server1
  • 说明server4主机fence暂时有问题
[root@server1 cluster]# crm
crm(live)# resource 
crm(live)resource# cleanup vmfence
Cleaning up vmfence on server1
Cleaning up vmfence on server4
Waiting for 1 replies from the CRMd. OK
  • 再次查看高可用状态:ok
Online: [ server1 server4 ]

 Resource Group: nginxgroup
     vip        (ocf::heartbeat:IPaddr2):   Started server1
     nginx  (lsb:nginx):    Started server1
vmfence (stonith:fence_xvm):    Started server4

你可能感兴趣的:(linux运维)