转自http://heylinux.com/archives/3034.html
参考http://navyaijm.blog.51cto.com/4647068/1258250
一、环境
系统 CentOS6.4x64最小化安装
gluster-1 192.168.3.21
gluster-2 192.168.3.22
gluster-3 192.168.3.23
gluster-4 192.168.3.24
gluster-client 192.168.3.25
二、配置hosts文件解析和时间同步
所有机器的hosts解析都一样
[root@gluster-1 ~]# tail -5 /etc/hosts 192.168.3.21 gluster-1 192.168.3.22 gluster-2 192.168.3.23 gluster-3 192.168.3.24 gluster-4 192.168.3.25 gluster-client
配置ntp时间同步,所有服务器都一样的设置
[root@gluster-1 ~]# echo "*/10 * * * * /usr/sbin/ntpdate asia.pool.ntp.org &>/dev/null" >/var/spool/cron/root
三、在gluster-{1,4}上部署cluster软件
1.安装
#安装gluster的yum源 [root@gluster-1 ~]# wget -P /etc/yum.repos.d http://download.gluster.org/pub/gluster/glusterfs/LATEST/CentOS/glusterfs-epel.repo #安装epel源 [root@gluster-1 ~]# rpm -ivh http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm [root@gluster-1 ~]# sed -i 's@#b@b@g' /etc/yum.repos.d/epel.repo [root@gluster-1 ~]# sed -i 's@mirrorlist@#mirrorlist@g' /etc/yum.repos.d/epel.repo [root@gluster-1 ~]# yum install glusterfs glusterfs-server glusterfs-fuse -y [root@gluster-1 ~]# chkconfig glusterd on [root@gluster-1 ~]# /etc/init.d/glusterd start Starting glusterd: [ OK ] [root@gluster-1 ~]# netstat -tunlp |grep glus tcp 0 0 0.0.0.0:24007 0.0.0.0:* LISTEN 22351/glusterd
2.服务端配置
将gluster-{1,4}添加到集群中
[root@gluster-1 ~]# gluster peer probe gluster-1 peer probe: success. Probe on localhost not needed [root@gluster-1 ~]# gluster peer probe gluster-2 peer probe: success. [root@gluster-1 ~]# gluster peer probe gluster-3 peer probe: success. [root@gluster-1 ~]# gluster peer probe gluster-4 peer probe: success. #在gluster-1上查看结果 [root@gluster-1 ~]# gluster peer status Number of Peers: 3 Hostname: gluster-2 Uuid: f41f5d44-a50c-4978-a2ba-9ba0081cc6f7 State: Peer in Cluster (Connected) Hostname: gluster-3 Uuid: 4bc822b0-f879-4ccb-adef-f3050efe21c5 State: Peer in Cluster (Connected) Hostname: gluster-4 Uuid: f9cd76bd-45d5-4424-a3d5-f4f399eefaa7 State: Peer in Cluster (Connected) #在gluster-2上查看结果 [root@gluster-2 ~]# gluster peer status Number of Peers: 3 Hostname: gluster-1 Uuid: 1ff3ce07-87df-465f-a613-583d887f2915 State: Peer in Cluster (Connected) Hostname: gluster-3 Uuid: 4bc822b0-f879-4ccb-adef-f3050efe21c5 State: Peer in Cluster (Connected) Hostname: gluster-4 Uuid: f9cd76bd-45d5-4424-a3d5-f4f399eefaa7 State: Peer in Cluster (Connected)
3.将gluster-{1,4}上的/dev/sdb挂载到/data下
[root@gluster-1 ~]# mkdir -p /data [root@gluster-1 ~]# tail -1 /etc/fstab /dev/sdb1 /data ext4 defaults 0 0 [root@gluster-1 ~]# mount -a [root@gluster-1 ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda3 18G 1.3G 16G 8% / tmpfs 116M 0 116M 0% /dev/shm /dev/sda1 194M 28M 156M 16% /boot /dev/sdb1 99G 188M 94G 1% /data
4.在gluster-1上创建3个GlusterFS磁盘
#创建一个卷GlusterFS名为glusterdata,副本数为2,以/data/gluster为共享目录 [root@gluster-1 ~]# gluster volume create glusterdata replica 2 transport tcp gluster-1:/data/gluster gluster-2:/data/gluster gluster-3:/data/gluster gluster-4:/data/gluster volume create: glusterdata: success: please start the volume to access data #同理创建第二个名为glusterdata1,副本数为2,以/data/gluster1为共享目录 [root@gluster-1 ~]# gluster volume create glusterdata1 replica 2 transport tcp gluster-1:/data/gluster1 gluster-2:/data/gluster1 gluster-3:/data/gluster1 gluster-4:/data/gluster1 volume create: glusterdata1: success: please start the volume to access data #同理创建第三个名为glusterdata2,副本数为2,以/data/gluster2为共享目录 [root@gluster-1 ~]# gluster volume create glusterdata2 replica 2 transport tcp gluster-1:/data/gluster2 gluster-2:/data/gluster2 gluster-3:/data/gluster2 gluster-4:/data/gluster2 volume create: glusterdata2: success: please start the volume to access data
5.启动卷
[root@gluster-1 ~]# gluster volume start glusterdata volume start: glusterdata: success [root@gluster-1 ~]# gluster volume start glusterdata1 volume start: glusterdata1: success [root@gluster-1 ~]# gluster volume start glusterdata2 volume start: glusterdata2: success #查看三个卷状态信息 [root@gluster-1 ~]# gluster volume info Volume Name: glusterdata Type: Distributed-Replicate Volume ID: 2055dbf2-eb8e-4bc5-bb64-76e4f1cab2ec Status: Started Number of Bricks: 2 x 2 = 4 Transport-type: tcp Bricks: Brick1: gluster-1:/data/gluster Brick2: gluster-2:/data/gluster Brick3: gluster-3:/data/gluster Brick4: gluster-4:/data/gluster Options Reconfigured: performance.readdir-ahead: on Volume Name: glusterdata1 Type: Distributed-Replicate Volume ID: 755fe958-76b7-4b57-abe2-f912af8b74ea Status: Started Number of Bricks: 2 x 2 = 4 Transport-type: tcp Bricks: Brick1: gluster-1:/data/gluster1 Brick2: gluster-2:/data/gluster1 Brick3: gluster-3:/data/gluster1 Brick4: gluster-4:/data/gluster1 Options Reconfigured: performance.readdir-ahead: on Volume Name: glusterdata2 Type: Distributed-Replicate Volume ID: 3d5ba912-c150-425b-ba28-d450ea99a4a9 Status: Started Number of Bricks: 2 x 2 = 4 Transport-type: tcp Bricks: Brick1: gluster-1:/data/gluster2 Brick2: gluster-2:/data/gluster2 Brick3: gluster-3:/data/gluster2 Brick4: gluster-4:/data/gluster2 Options Reconfigured: performance.readdir-ahead: on
四、客户端安装配置
安装软件
[root@gluster-client ~]# yum install glusterfs glusterfs-fuse -y
挂载glusterfs文件系统
#挂载文件系统,挂载任意一个glusterfs节点都可以 [root@gluster-client ~]# mount -t glusterfs 192.168.3.21:/glusterdata /data1 [root@gluster-client ~]# mount -t glusterfs 192.168.3.22:/glusterdata1 /data2 [root@gluster-client ~]# mount -t glusterfs 192.168.3.22:/glusterdata2 /data3 #查看结果 [root@gluster-client ~]# df -h Filesystem Size Used Avail Use% Mounted on /dev/sda3 18G 1.3G 16G 8% / tmpfs 116M 0 116M 0% /dev/shm /dev/sda1 194M 28M 156M 16% /boot 192.168.3.21:/glusterdata 197G 376M 187G 1% /data1 192.168.3.22:/glusterdata1 197G 376M 187G 1% /data2 192.168.3.22:/glusterdata2 197G 376M 187G 1% /data3
五、测试
1、检查文件正确性
#在客户端生成测试文件 [root@gluster-client ~]# dd if=/dev/urandom of=/tmp/navy bs=1M count=100 100+0 records in 100+0 records out 104857600 bytes (105 MB) copied, 11.6333 s, 9.0 MB/s #将测试文件拷贝到存储上 [root@gluster-client ~]# cp /tmp/navy /data1 #检查文件的hash [root@gluster-client ~]# md5sum /tmp/navy /data1/navy 59c1ae105edda2edcbdcfe0b8d835ac7 /tmp/navy 59c1ae105edda2edcbdcfe0b8d835ac7 /data1/navy
2、宕机测试。使用glusterfs-fuse挂载,即使目标服务器故障,也完全不影响使用。用NFS则要注意挂载选项,否则服务端故障容易导致文件系统halt住而影响服务!
# 将其中一个节点停止存储服务 [root@gluster-3 ~]# service glusterfsd stop Stopping glusterfsd: [ OK ] #在客户端删除文件navy [root@gluster-client ~]# rm -rf /data1/navy [root@gluster-client ~]# ll /data1 total 0 #在gluster-3查看glusterfsd的/data/gluster目录,在该目录中有我们创建的文件navy [root@gluster-3 ~]# ll /data/gluster total 102404 -rw-r--r-- 2 root root 104857600 Aug 4 14:26 navy #启动glusterfsd服务 [root@gluster-3 ~]# service glusterfsd start [root@gluster-3 ~]# ll /data/gluster #能看到文件已经被删除
六、运维常用命令
#删除卷 gluster volume stop img gluster volume delete img #将机器移出集群 gluster peer detach 172.28.26.102 #只允许172.28.0.0的网络访问glusterfs gluster volume set img auth.allow 172.28.26.* #加入新的机器并添加到卷里(由于副本数设置为2,至少要添加2(4、6、8..)台机器) gluster peer probe 172.28.26.105 gluster peer probe 172.28.26.106 gluster volume add-brick img 172.28.26.105:/data/gluster 172.28.26.106:/data/gluster #收缩卷 # 收缩卷前gluster需要先移动数据到其他位置 gluster volume remove-brick img 172.28.26.101:/data/gluster/img 172.28.26.102:/data/gluster/img start # 查看迁移状态 gluster volume remove-brick img 172.28.26.101:/data/gluster/img 172.28.26.102:/data/gluster/img status # 迁移完成后提交 gluster volume remove-brick img 172.28.26.101:/data/gluster/img 172.28.26.102:/data/gluster/img commit #迁移卷 # 将172.28.26.101的数据迁移到,先将172.28.26.107加入集群 gluster peer probe 172.28.26.107 gluster volume replace-brick img 172.28.26.101:/data/gluster/img 172.28.26.107:/data/gluster/img start # 查看迁移状态gluster volume replace-brick img 172.28.26.101:/data/gluster/img 172.28.26.107:/data/gluster/img status # 数据迁移完毕后提交gluster volume replace-brick img 172.28.26.101:/data/gluster/img 172.28.26.107:/data/gluster/img commit # 如果机器172.28.26.101出现故障已经不能运行,执行强制提交然后要求gluster马上执行一次同步 gluster volume replace-brick img 172.28.26.101:/data/gluster/img 172.28.26.102:/data/gluster/img commit -force gluster volume heal imgs full