所有节点执行
yum install centos-release-gluster epel-release -y
yum install glusterfs-server -y
手动配置yum源
[root@kub1 yum.repos.d]# cat gluster.repo
[gluster]
name=gluster
baseurl=https://buildlogs.centos.org/centos/7/storage/x86_64/gluster-3.8/
gpgcheck=0
enabled=1
所有gluster集群节点执行
mkfs.xfs /dev/sdb
mount /dev/sdb /mnt
mkdir /mnt/vg0 (作为brick)
systemctl start glusterd
分别在自己节点执行加入其它节点
gluster peer probe node2
gluster peer probe node3
查看集群状态
[root@kub3 vg0]# gluster peer status
Number of Peers: 2
Hostname: kub2
Uuid: 91b6fd97-ee12-4c67-b9c1-4ac594040f6e
State: Peer in Cluster (Connected)
Hostname: kub1
Uuid: 78d5be2c-0b42-4840-9001-65aa82a677ef
State: Peer in Cluster (Connected)
创建卷分布式
gluster volume create vg0 replication 3 kub1:/mnt/vg0/ kub2:/mnt/vg0/ kub3:/mnt/vg0/
测试卷-本地挂载
mount -t glusterfs kub1:/vg0 /ceshi/
kub1:/vg0 nfs 40G 442M 40G 2% /ceshi
在NFS服务端节点安装(需要通过nfs挂载gluster的集群节点)
yum install nfs-ganesha nfs-ganesha-gluster
启动服务
systemctl start nfs-ganesha
日志文件
[root@kub1 ~]# tail -f /var/log/ganesha/ganesha.log
编辑配置文件ganesha.conf
[root@kub1 ~]# cat /etc/ganesha/ganesha.conf |grep -v ^$ | grep -v "#"
EXPORT
{
Export_Id = 1 ;
Path = "/vg0";
Pseudo = "/root/vg0_pseudo";
Disable_ACL = TRUE;
Protocols = 3,4;
Access_Type = RW;
Squash = No_root_squash;
Sectype = sys;
Transports = "UDP","TCP";
FSAL {
Name = GLUSTER;
hostname = "kub1";
volume = "vg0";
}
}
重启服务
systemctl restart nfs-ganesha
客户端挂载
非gluster节点需要安装nfs-utils
[root@wss ~]# yum install nfs-utils
[root@wss ~]# mount -t nfs 192.168.5.10:/vg0 /ceshi/
gluster节点挂载
[root@kub1 ~]# mount -t nfs kub1:/vg0 /ceshi/
[root@kub1 ~]# echo 123 > /ceshi/aaa
[root@kub1 ~]# cat /ceshi/aaa
123