Keepalived+drbd+nfs 实现nfs高可用

前言

  Centos7.5下Keepalive+drbd+nfs练习笔记

主机准备

drbd服务主机需要可用的分区或者磁盘

主机名 IP
test(部署及nfs客户端) 192.168.0.35
drbd-0001 192.168.0.81
drbd-0002 192.168.0.82

部署test主机ansible

ansible 环境部署

[root@test ~]# yum -y install ansible
[root@test ~]# vim /etc/hosts
192.168.0.81 drbd-0001
192.168.0.82 drbd-0002
[root@test ~]# vim /etc/ansible/ansible.cfg 
[defaults]
inventory      = /etc/ansible/hosts
host_key_checking = False
[root@test ~]# vim /etc/ansible/hosts 
[drbd]
drbd-000[1:2]
[root@test ~]# ssh-copy-id drbd-0001
[root@test ~]# ssh-copy-id drbd-0002

ansible 环境检测

[root@test ~]# ansible drbd -m ping
drbd-0001 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}
drbd-0002 | SUCCESS => {
    "changed": false, 
    "ping": "pong"
}

drbd部署

检查可用磁盘

[root@test ~]# ansible drbd -m shell -a 'ls /dev/vdb'
drbd-0002 | SUCCESS | rc=0 >>
/dev/vdb

drbd-0001 | SUCCESS | rc=0 >>
/dev/vdb

磁盘分区 vdb1作为数据盘 vdb2作为元数据存储

[root@test ~]# ansible drbd -m shell -a 'parted /dev/vdb mklabel GPT ;parted /dev/vdb mkpart primary 1M 90%;parted /dev/vdb mkpart primary 90% 100%'
[root@test ~]# ansible drbd -m shell -a 'lsblk'
drbd-0001 | SUCCESS | rc=0 >>
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
vda    253:0    0   40G  0 disk 
└─vda1 253:1    0   40G  0 part /
vdb    253:16   0   10G  0 disk 
├─vdb1 253:17   0    9G  0 part 
└─vdb2 253:18   0 1023M  0 part 

drbd-0002 | SUCCESS | rc=0 >>
NAME   MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
vda    253:0    0   40G  0 disk 
└─vda1 253:1    0   40G  0 part /
vdb    253:16   0   10G  0 disk 
├─vdb1 253:17   0    9G  0 part 
└─vdb2 253:18   0 1023M  0 part 

配置/etc/hosts 此处主机名必须和drbd主机名一致

[root@test ~]# vim hosts
::1	localhost	localhost.localdomain	localhost6	localhost6.localdomain6
127.0.0.1	localhost	localhost.localdomain	localhost4	localhost4.localdomain4
192.168.0.81 drbd-0001
192.168.0.82 drbd-0002
[root@test ~]# ansible drbd -m copy -a 'src=hosts dest=/etc/hosts'

安装drbd84和kmod-drbd84

[root@test ~]# ansible drbd -m yum -a 'name=drbd84-utils,kmod-drbd84 state=installed'

加载模块

[root@test ~]# ansible drbd -m shell -a 'modprobe drbd'
drbd-0002 | FAILED | rc=1 >>
modprobe: FATAL: Module drbd not found.non-zero return code

drbd-0001 | FAILED | rc=1 >>
modprobe: FATAL: Module drbd not found.non-zero return code

若有以上报错,需要再安装内核升级包,再加载模块

[root@test ~]# ansible drbd -m yum -a 'name=kernel-devel,kernel,kernel-headers state=installed'
[root@test ~]# ansible drbd -m shell -a 'reboot'
[root@test ~]# ansible drbd -m shell -a 'modprobe drbd'
[root@test ~]# ansible drbd -m shell -a 'lsmod | grep drbd'
drbd-0001 | SUCCESS | rc=0 >>
drbd                  397041  0 
libcrc32c              12644  1 drbd

drbd-0002 | SUCCESS | rc=0 >>
drbd                  397041  0 
libcrc32c              12644  1 drbd

给/dev/vdb1创建文件系统 元数据盘不能创建文件系统

[root@test ~]# ansible drbd -m shell -a 'mkfs.ext4 /dev/vdb1'

编辑drbd配置文件

[root@test ~]# vim global_common.conf
global {
    usage-count no;        
}
common {
    protocol C;                     #c模式传输(此模式更可靠)
    disk {
 	    on-io-error detach;
    }
    syncer {
	    rate 100M;                  #设定传输速率
    }
}
resource data {                     #为资源起名为data
    on drbd-0001 {                  #节点名必须为主机名
  	    device /dev/drbd1;          #为drbd虚拟磁盘取名
	    disk /dev/vdb1;             #指定数据盘
	    address 192.168.0.81:7899;
	    meta-disk /dev/vdb2[0];     #指定元数据盘
    }
    on drbd-0002 {              
	    device /dev/drbd1;      
	    disk /dev/vdb1;
	    address 192.168.0.82:7899;
	    meta-disk /dev/vdb2[0];
    }
}
[root@test ~]# ansible drbd -m copy -a 'src=global_common.conf dest=/etc/drbd.d/global_common.conf'

创建 资源名为data的元数据

[root@test ~]# ansible drbd -m shell -a 'drbdadm create-md data'

启动 资源名为data的元数据

[root@test ~]# ansible drbd -m shell -a 'drbdadm up data'

在drbd-0001节点上查看信息 此时2者均为从状态

[root@test ~]# ansible drbd-0001 -m shell -a 'cat /proc/drbd'
drbd-0001 | SUCCESS | rc=0 >>
version: 8.4.11-1 (api:1/proto:86-101)
GIT-hash: 66145a308421e9c124ec391a7848ac20203bb03c build by mockbuild@, 2018-11-03 01:26:55

 1: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r-----
    ns:0 nr:0 dw:0 dr:0 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:9435904

设定drbd-0001为主并同步数据

[root@test ~]# ansible drbd-0001 -m shell -a 'drbdadm -- --overwrite-data-of-peer primary data'

此时查看,正在同步数据

[root@test ~]# ansible drbd-0001 -m shell -a 'drbdadm -- --overwrite-data-of-peer primary data'
[root@test ~]# ansible drbd-0001 -m shell -a 'cat /proc/drbd'drbd-0001 | SUCCESS | rc=0 >>
version: 8.4.11-1 (api:1/proto:86-101)
GIT-hash: 66145a308421e9c124ec391a7848ac20203bb03c build by mockbuild@, 2018-11-03 01:26:55

 1: cs:SyncSource ro:Primary/Secondary ds:UpToDate/Inconsistent C r-----
    ns:1028096 nr:0 dw:0 dr:1030168 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:8408064
	[=>..................] sync'ed: 11.0% (8208/9212)M
	finish: 0:03:31 speed: 39,680 (39,540) K/sec

数据同步完成后,drbd-0001为主,且数据同步完成则配置完成

[root@test ~]# ansible drbd-0001 -m shell -a 'cat /proc/drbd'
drbd-0001 | SUCCESS | rc=0 >>
version: 8.4.11-1 (api:1/proto:86-101)
GIT-hash: 66145a308421e9c124ec391a7848ac20203bb03c build by mockbuild@, 2018-11-03 01:26:55

 1: cs:Connected ro:Primary/Secondary ds:UpToDate/UpToDate C r-----
    ns:9436160 nr:0 dw:0 dr:9438232 al:8 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:0

测试drbd配置 将虚拟磁盘/dev/drbd1 挂载/data 创建测试文件

[root@test ~]# ansible drbd-0001 -m shell -a 'mkdir /data;mount /dev/drbd1 /data;touch /data/a.txt'

在drbd-0002节点上关闭drbd服务,挂载/dev/vdb1 查看测试文件是否存在

[root@test ~]# ansible drbd-0002 -m shell -a 'drbdadm down data;mkdir /data ;mount /dev/vdb1 /data;ls /data'
drbd-0002 | SUCCESS | rc=0 >>
a.txt
lost+found

恢复drbd节点的drbd服务

[root@test ~]# ansible drbd-0002 -m shell -a 'umount /data;drbdadm up data'

keepavle+nfs部署

安装keepalived 和nfs-utils,192.168.0.210为vip

[root@test ~]# ansible drbd -m yum -a 'name=keepalived,nfs-utils state=installed'

编辑keepalived配置文件
drbd-0001配置为

[root@drbd-0001 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
     [email protected]
     [email protected]
     [email protected]
   }
   notification_email_from [email protected]
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id drbd1
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
vrrp_script check_service {
   script "ss -nualpt | grep 2049" 
   interval 2
   weight -50
}

vrrp_instance VI_1 {
    state MASTER
    interface eth0
    virtual_router_id 51
    priority 105
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        check_service
    }
    notify_master /etc/keepalived/notify_master.sh
    notify_backup /etc/keepalived/notify_backup.sh
    notify_stop /etc/keepalived/notify_stop.sh
    virtual_ipaddress {
        192.168.0.210 dev eth0 label eth0:1
    }
}

drbd-0002的配置为

[root@drbd-0002 ~]# vim /etc/keepalived/keepalived.conf
global_defs {
   notification_email {
     [email protected]
     [email protected]
     [email protected]
   }
   notification_email_from [email protected]
   smtp_server 192.168.200.1
   smtp_connect_timeout 30
   router_id drbd2
   vrrp_skip_check_adv_addr
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}
vrrp_script check_service {
   script "ss -nualpt | grep 2049 "
   interval 2
   weight -50
}

vrrp_instance VI_1 {
    state BACKUP
    interface eth0
    virtual_router_id 51
    priority 95
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }
    track_script {
        check_service
    }
    notify_master /etc/keepalived/notify_master.sh
    notify_backup /etc/keepalived/notify_backup.sh
    notify_stop /etc/keepalived/notify_stop.sh
    virtual_ipaddress {
        192.168.0.210 dev eth0 label eth0:1
    }
}

配置nfs文件

[root@test ~]# vim exports
/data 192.168.0.0/24(ro)
[root@test ~]# ansible drbd -m copy -a 'src=exports dest=/etc/'

配置脚本文件

[root@test ~]# vim notify_master.sh 
#!/bin/bash
drbdadm primary data
mount /dev/drbd1 /data
systemctl start nfs
[root@test ~]# vim notify_backup.sh 
#!/bin/bash
systemctl stop nfs
umount /data
drbdadm secondary data
[root@test ~]# vim notify_stop.sh 
#!/bin/bash
systemctl stop nfs
umount /data
drbdadm secondary data
systemctl restart nfs
[root@test ~]# ansible drbd -m copy -a 'src=notify_master.sh dest=/etc/keepalived/'
[root@test ~]# ansible drbd -m copy -a 'src=notify_backup.sh dest=/etc/keepalived/'
[root@test ~]# ansible drbd -m copy -a 'src=notify_stop.sh dest=/etc/keepalived/'
[root@test ~]# ansible drbd -m shell -a 'chmod +x /etc/keepalived/noti*' 

启动服务调试

drbd主从检查

[root@test ~]# ansible drbd -m shell -a 'cat /proc/drbd'

启动nfs服务

ansible drbd -m shell -a 'systemctl start nfs'

启动keepalive

[root@test ~]# ansible drbd -m shell -a 'systemctl start keepalived'

nfs客户端挂载

[root@test ~]# mkdir /data
[root@test ~]# mount 192.168.0.210:/data /data/

在主节点停止keepalive服务或者nfs服务测试

你可能感兴趣的:(drbd)