官网地址:https://linbit.com/linbit-software-download-page-for-linstor-and-drbd-linux-driver/#drbd-9
本文用到的是:drbd-9.1.5.tar.gz drbd-utils-9.19.1.tar.gz
两台机器增加hosts记录并确定已生效
192.168.133.142 node3
192.168.133.139 node4
每一台服务器都新增一块硬盘 /dev/sdb
安装kernel相关组件,yum install kernel-*,包括kernel-devel,kernel-headers,kernel-tools等,注意安装的kernel-devel的版本要与uname -r的版本一致,关系到后面的安装,否则会出现报错
安装drbd内核模块,直接make安装
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
# 配置拓展源
yum install kernel kernel-devel kernel-tools kernel-headers -y
tar xf drbd-9.1.5.tar.gz -C /usr/local/src/
cd /usr/local/src/drbd-9.1.5/
yum install gcc gcc-c++ make -y
yum install patch
make KDIR=/usr/src/kernels/3.10.0-1160.49.1.el7.x86_64
make install
完成安装后新增如下文件
ll /lib/modules/3.10.0-1160.49.1.el7.x86_64/updates
total 13852
-rw-r--r-- 1 root root 13516648 Jan 12 10:19 drbd.ko
-rw-r--r-- 1 root root 664760 Jan 12 10:19 drbd_transport_tcp.ko
加载drbd模块并确认生效
cd /lib/modules/3.10.0-1160.49.1.el7.x86_64
cp updates/drbd*ko kernel/lib/
depmod
reboot
modprobe drbd
lsmod | grep drbd
drbd 554407 0
libcrc32c 12644 4 xfs,drbd,nf_nat,nf_conntrack
yum install -y flex po4a libxslt docbook*
yum install rubygem-asciidoctor
tar xf drbd-utils-9.19.0.tar.gz -C /usr/local/src/
cd /usr/local/src/drbd-utils-9.19.1/
./configure -–prefix=/usr/local/drbd-utils
make && make install
为drbd-overview.pl这个脚本创建软链接
ln -sv /usr/local/src/drbd-utils-9.19.0/scripts/drbd-overview.pl /usr/bin/drbd-overview.pl
mv /usr/local/drbd-utils/etc/drbd.d/global_common.conf /usr/local/drbdutils/etc/drbd.d/global_common.conf.orig
cat << EOF > /usr/local/drbd-utils/etc/drbd.d/global_common.conf
global {
usage-count no;
}
common {
net {
protocol C;
}
}
EOF
cat << EOF > /usr/local/drbd-utils/etc/drbd.d/nfs.res
resource nfs {
disk /dev/sdb;
device /dev/drbd0;
meta-disk internal;
on node3 {
address 192.168.133.142:7789;
}
on node4 {
address 192.168.133.139:7789;
}
}
EOF
新建资源(两台机器都操作)
drbdadm create-md nfs
drbdadm up nfs # 启用资源
查看节点的角色,当前两台均为备机
drbdadm role nfs
将主节点设置primary
drbdadm primary --force nfs
查看drbd状态执行drbd-overview
drbd-overview.pl
mkdir /data
# 主
mkfs.xfs /dev/drbd0
mount /dev/drbd0 /data
touch /data/file{1..3}
卸载文件系统并切换为备节点
umount /data
drbdadm secondary nfs
在从上执行以下命令确认文件
drbdadm primary nfs
mount /dev/drbd0 /data
ls -l /data
total 0
-rw-r--r--. 1 root root 0 Jun 10 09:43 file1
-rw-r--r--. 1 root root 0 Jun 10 09:43 file2
-rw-r--r--. 1 root root 0 Jun 10 09:43 file3
yum -y install rpcbind nfs-utils
cat /etc/exports
/data 192.168.133.0/24(rw,sync)
systemctl restart rpcbind
systemctl restart nfs
systemctl enable nfs-server rpcbind
为了展现效果这边就直接yum安装了
两台机器都安装
yum install keepalived -y
主节点配置:
两台机器都创建logs目录:mkdir /etc/keepalived/logs
[root@node7 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id DRBD_HA_MASTER
}
vrrp_script chk_nfs {
script "/etc/keepalived/chk_nfs.sh"
interval 2
weight -40
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nfs
}
notify_stop /etc/keepalived/notify_stop.sh
notify_master /etc/keepalived/notify_master.sh
notify_backup /etc/keepalived/notify_backup.sh
virtual_ipaddress {
192.168.133.200
}
}
脚本
[root@node2 keepalived]# cat chk_nfs.sh
#!/bin/bash
#
/usr/bin/systemctl status nfs # (检查nfs服务的状态)
if [ $? -ne 0 ] #(判断上一条指令执行后的返回值,0代表nfs正在运行,1代表nfs已停止)
then
#如果服务状态不正常,先尝试重启服务
/usr/bin/systemctl start nfs
if [ $? -ne 0 ]
then
/usr/bin/umount /dev/drbd0 #(卸载/dev/drbd0设备)
/usr/sbin/drbdadm secondary nfs #(设置该主机drbd状态为从设备)
/usr/bin/systemctl stop keepalived #(停止keepalived服务)
fi
fi
[root@node7 ~]# cat /etc/keepalived/notify_master.sh
#!/bin/bash
time=`date "+%F %T"`
echo -e "$time ------notify_master------\n" >> /etc/keepalived/logs/notify_master.log
drbdadm primary nfs &>> /etc/keepalived/logs/notify_master.log
mount /dev/drbd0 /data &>> /etc/keepalived/logs/notify_master.log
systemctl start nfs &>> /etc/keepalived/logs/notify_master.log
echo -e "\n" >> /etc/keepalived/logs/notify_master.log
[root@node7 ~]# cat /etc/keepalived/notify_backup.sh
#!/bin/bash
time=`date "+%F %H:%M:%S"`
echo -e "$time ------notify_backup------\n" >> /etc/keepalived/logs/notify_backup.log
systemctl stop nfs &>> /etc/keepalived/logs/notify_backup.log
umount /data &>> /etc/keepalived/logs/notify_backup.log
drbdadm secondary nfs &>> /etc/keepalived/logs/notify_backup.log
echo -e "\n" >> /etc/keepalived/logs/notify_backup.log
[root@node7 ~]# cat /etc/keepalived/notify_stop.sh
#!/bin/bash
time=`date "+%F %H:%M:%S"`
echo -e "$time ------notify_stop------\n" >> /etc/keepalived/logs/notify_stop.log
systemctl stop nfs &>> /etc/keepalived/logs/notify_stop.log
umount /data &>> /etc/keepalived/logs/notify_stop.log
drbdadm secondary nfs &>> /etc/keepalived/logs/notify_stop.log
echo -e "\n" >> /etc/keepalived/logs/notify_stop.log
脚本增加执行权限:chomod +x *.sh
将配置文件和脚本拷贝到备节点:
[root@node7 keepalived]# scp *.sh keepalived.conf node8:/etc/keepalived/
从节点配置文件
[root@node8 ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id DRBD_HA_BACKUP
}
vrrp_script chk_nfs {
script "/etc/keepalived/chk_nfs.sh"
interval 2
weight -40
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_nfs
}
notify_stop /etc/keepalived/notify_stop.sh
notify_master /etc/keepalived/notify_master.sh
notify_backup /etc/keepalived/notify_backup.sh
virtual_ipaddress {
192.168.133.200
}
}
#关闭nfs查看是否自启
#关闭主服务器,测试备服务器是否可用
主服务器的/data是否自动umount,从服务器是否自动挂载