目录
1 准备工作
1.1 每台机器添加一块磁盘
1.2 分区
1.3 格式化磁盘
1.4 挂载磁盘
2 安装GlusterFS
2.1 安装依赖
2.2 安装glusterfs
2.3 启动服务
2.4 存储主机加入信任存储池
2.5 创建分布式复制卷
前面我们讲到过分布式卷的创建,这里准备工作都是一样的,直接复制过来
机器 | 规划 |
---|---|
wyl01 | wyl01,wyl02 为复制 1,2和3,4作为分布式 |
wyl02 | wyl01,wyl02 为复制 1,2和3,4作为分布式 |
wyl03 | wyl03,wyl04 为复制 1,2和3,4作为分布式 |
wyl04 | wyl03,wyl04 为复制 1,2和3,4作为分布式 |
wyl05 | 客户端 |
# 分区(4台都要执行wyl01-04)
[root@wyl01 opt]# fdisk /dev/sdb
#这里按照提示操作
# 初始化磁盘(4台都要执行,wyl01-04)
[root@wyl01 ~]# mkfs.xfs -f /dev/sdb1
meta-data=/dev/sdb1 isize=512 agcount=4, agsize=655296 blks
= sectsz=512 attr=2, projid32bit=1
= crc=1 finobt=0, sparse=0
data = bsize=4096 blocks=2621184, imaxpct=25
= sunit=0 swidth=0 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=1
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=0 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
# 挂载,三台都要执行
[root@wyl01 opt]# mount /dev/sdb1 /data/
# 查看挂载情况
[root@wyl01 opt]# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/mapper/cl-root 17G 9.2G 7.9G 55% /
devtmpfs 473M 0 473M 0% /dev
tmpfs 489M 84K 489M 1% /dev/shm
tmpfs 489M 14M 476M 3% /run
tmpfs 489M 0 489M 0% /sys/fs/cgroup
/dev/sda1 1014M 173M 842M 18% /boot
tmpfs 98M 16K 98M 1% /run/user/42
tmpfs 98M 0 98M 0% /run/user/0
/dev/sdb1 9.8G 37M 9.2G 1% /data # 已经成功挂载
设置开机自动挂载
vim /ey/fstab
/dev/sdb1 /data xfs defaults 0 0
# 所有机器都要安装
yum install -y flex bison openssl openssl-devel acl libacl libacl-devel sqlite-devel libxml2-devel python-devel make cmake gcc gcc-c++ autoconf automake libtool unzip zip
安装两台机器都要安装
[root@wyl01 opt]# tar -zxvf /usr/local/src/glusterfs-3.6.9.tar.gz -C /usr/local/
[root@wyl01 glusterfs-3.6.9]# cd /usr/local/glusterfs-3.6.9/
[root@wyl01 glusterfs-3.6.9]# ./configure --prefix=/usr/local/glusterfs
[root@wyl01 glusterfs-3.6.9]# make && make install
添加环境变量
[root@wyl01 glusterfs-3.6.9]# vim /etc/profile #在文件最底部添加如下内容
export GLUSTERFS_HOME=/usr/local/glusterfs
export PATH=$PATH:$GLUSTERFS_HOME/sbin
#环境变量生效
[root@wyl01 glusterfs-3.6.9]# source /etc/profile
#启动服务两台均执行
[root@wyl01 opt]# /usr/local/glusterfs/sbin/glusterd
在wyl01机器上执行(在其它机器上执行,不需要执行自己probe)
[root@wyl01 init.d]# gluster peer probe wyl02
peer probe: success.
[root@wyl01 init.d]# gluster peer probe wyl03
peer probe: success.
[root@wyl01 init.d]# gluster peer probe wyl04
peer probe: success.
查看状态,我们换到wyl02机器上执行查看
[root@wyl02 ~]# gluster peer status
Number of Peers: 3
Hostname: wyl01
Uuid: 2a0c7c38-8d0d-41b6-ac74-6066d0174e3f
State: Peer in Cluster (Connected)
Hostname: wyl03
Uuid: 017fdca6-93f2-4275-b150-901936cc2ddb
State: Peer in Cluster (Connected)
Hostname: wyl04
Uuid: 711bd618-1585-41ac-8dc0-66002a855b43
State: Peer in Cluster (Connected)
分布式复制复制卷:volume中brick所包含的存储服务器数必须是relica的倍数(>= 2倍),兼顾分布式和复制式的功能。
创建卷
[root@wyl01 data]# gluster volume create gv1 replica 2 wyl01:/data wyl02:/data wyl03:/data wyl04:/data force
volume create: gv1: success: please start the volume to access data
启动卷
[root@wyl01 data]# gluster volume start gv1
volume start: gv1: success
任意一台查看
[root@wyl01 data]# gluster volume info
Volume Name: gv1
Type: Distributed-Replicate
Volume ID: a108e1ef-1d05-4907-bd3b-d63c9ba57a91
Status: Started
Snapshot Count: 0
Number of Bricks: 2 x 2 = 4
Transport-type: tcp
Bricks:
Brick1: wyl01:/data
Brick2: wyl02:/data
Brick3: wyl03:/data
Brick4: wyl04:/data
Options Reconfigured:
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
[root@wyl01 ~]# gluster volume status
Status of volume: gv1
Gluster process TCP Port RDMA Port Online Pid
------------------------------------------------------------------------------
Brick wyl01:/data 49152 0 Y 3681
Brick wyl02:/data 49152 0 Y 3082
Brick wyl03:/data 49152 0 Y 11408
Brick wyl04:/data 49152 0 Y 3699
Self-heal Daemon on localhost N/A N/A Y 3704
Self-heal Daemon on wyl03 N/A N/A Y 11431
Self-heal Daemon on wyl02 N/A N/A Y 3105
Self-heal Daemon on wyl04 N/A N/A Y 3722
Task Status of Volume gv1
------------------------------------------------------------------------------
There are no active volume tasks
在客户端挂载卷到目录
[root@wyl05 data]# mount -t glusterfs 192.168.190.130:gv1 /data
4个节点,现在卷的大小为20G,现在我们创建一些文件验证
[root@wyl05 data]# touch {1..9}.txt