环境: CentOS release 6.6 (Final) vmware虚拟机
1.制作raid10
[root@kevin ~]# ll /dev/sd*
brw-rw---- 1 root disk 8, 0 Dec 12 21:16 /dev/sda
brw-rw---- 1 root disk 8, 1 Dec 12 21:16 /dev/sda1
brw-rw---- 1 root disk 8, 2 Dec 12 21:16 /dev/sda2
brw-rw---- 1 root disk 8, 3 Dec 12 21:16 /dev/sda3
brw-rw---- 1 root disk 8, 16 Dec 12 21:16 /dev/sdb
brw-rw---- 1 root disk 8, 32 Dec 12 21:16 /dev/sdc
brw-rw---- 1 root disk 8, 48 Dec 12 21:16 /dev/sdd
brw-rw---- 1 root disk 8, 64 Dec 12 21:16 /dev/sde
brw-rw---- 1 root disk 8, 80 Dec 12 21:16 /dev/sdf --准备了b,c,d,e,f五块干净的盘。
[root@kevin ~]#
mdadm -C mda -l1 -n2 /dev/sd{b,c} --将b,c制作成raid1
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md/mda started.
[root@kevin ~]#
mdadm -C mdb -l1 -n2 /dev/sd{d,e} --将d,e制作成raid1
mdadm: Note: this array has metadata at the start and
may not be suitable as a boot device. If you plan to
store '/boot' on this device please ensure that
your boot-loader understands md/v1.x metadata, or use
--metadata=0.90
Continue creating array? y
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md/mdb started.
[root@kevin ~]# ll /dev/md
total 4
lrwxrwxrwx 1 root root 8 Dec 12 21:19 mda -> ../md127
lrwxrwxrwx 1 root root 8 Dec 12 21:19 mdb -> ../md126
-rw------- 1 root root 116 Dec 12 21:19 md-device-map
[root@kevin ~]#
mdadm -C md10 -l0 -n 2 /dev/md12{6,7} --将上边制作出来的两个raid1,制作成raid10
mdadm: Defaulting to version 1.2 metadata
mdadm: array /dev/md/md10 started.
[root@kevin ~]#
参数解释:
-l, --level
-n, --raid-devices
-C, --create
2.分区
如果系统没有parted命令,用yum安装一下
[root@kevin ~]# parted /dev/md/md1010
-bash: parted: command not found
[root@kevin ~]# yum provides parted
......
[root@kevin ~]# yum -y install parted --安装分区工具
......
分区:
[root@kevin ~]# parted /dev/md/md10
GNU Parted 2.1
Using /dev/md125
Welcome to GNU Parted! Type 'help' to view a list of commands.
(parted)
mklabel gpt
(parted) print
Model: Unknown (unknown)
Disk /dev/md125: 42.9GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Number Start End Size File system Name Flags
(parted)
mkpart primary 1M 1023M
(parted)
mkpart primary 1024M 2047M
(parted) print
Model: Unknown (unknown)
Disk /dev/md125: 42.9GB
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Number Start End Size File system Name Flags
1 1049kB 1023MB 1022MB primary
2 1024MB 2047MB 1022MB primary
(parted) quit
Information: You may need to update /etc/fstab.
[root@kevin ~]#
3.创建文件系统(格式化)
[root@kevin ~]# partx -a /dev/md/md10
BLKPG: Device or resource busy
error adding partition 1
BLKPG: Device or resource busy
error adding partition 2
[root@kevin ~]#
ls /dev/md/md1*
/dev/md/md10 /dev/md/md10p1 /dev/md/md10p2
[root@kevin ~]#
mkfs.ext4 /dev/md/md10p1
mke2fs 1.41.12 (17-May-2010)
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=128 blocks, Stripe width=256 blocks
62464 inodes, 249600 blocks
12480 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=255852544
8 block groups
32768 blocks per group, 32768 fragments per group
7808 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376
Writing inode tables: done
Creating journal (4096 blocks): done
Writing superblocks and filesystem accounting information: done
This filesystem will be automatically checked every 31 mounts or
180 days, whichever comes first. Use tune2fs -c or -i to override.
[root@kevin ~]#
mkfs.ext4 /dev/md/md10p2
......
[root@kevin ~]# mkdir /mnt/p{1,2}
4.绑定分区uuid
[root@kevin ~]# blkid |grep md125p|awk -F" " '{print $2}' |xargs -I {} echo {} >> /etc/fstab
[root@kevin ~]# vim /etc/fstab
[root@kevin ~]# mount -a
[root@kevin ~]# tail -2 /etc/fstab
UUID=015e68a9-4f77-4ba4-bfd0-74a3fa05f940 /mnt/p1 ext4 defaults,sync 0 0
UUID=aa6c0aee-557d-470d-80ad-942c43b5704e /mnt/p2 ext4 defaults,sync 0 0
[root@kevin ~]# df -ThP
Filesystem Type Size Used Avail Use% Mounted on
/dev/sda3 ext4 87G 3.5G 79G 5% /
tmpfs tmpfs 1.9G 72K 1.9G 1% /dev/shm
/dev/sda1 ext4 488M 31M 432M 7% /boot
/dev/md125p1 ext4 944M 1.2M 894M 1% /mnt/p1
/dev/md125p2 ext4 944M 1.2M 894M 1% /mnt/p2
[root@kevin ~]#
5.测试效果
格式化sdf并挂载
[root@kevin ~]#
mkfs.ext4 /dev/sdf
mke2fs 1.41.12 (17-May-2010)
/dev/sdf is entire device, not just one partition!
Proceed anyway? (y,n) y
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
1310720 inodes, 5242880 blocks
262144 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=4294967296
160 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting information: done
This filesystem will be automatically checked every 27 mounts or
180 days, whichever comes first. Use tune2fs -c or -i to override.
[root@kevin ~]# mkdir /mnt/sdf
[root@kevin ~]# blkid |grep sdf|awk -F" " '{print $2}' |xargs -I {} echo {} >> /etc/fstab
[root@kevin ~]# vim /etc/fstab
[root@kevin ~]# mount -a
[root@kevin ~]# tail -3 /etc/fstab
UUID=015e68a9-4f77-4ba4-bfd0-74a3fa05f940 /mnt/p1 ext4 defaults,sync 0 0
UUID=aa6c0aee-557d-470d-80ad-942c43b5704e /mnt/p2 ext4 defaults,sync 0 0
UUID=a4a8a86b-e366-4245-9497-cd9058e6a98d
/mnt/sdf ext4 defaults,sync 0 0
[root@kevin ~]#
模拟拷贝1g数据进入不同分区,查看拷贝情况。
[root@kevin dev]# dd if=/dev/zero of=kevin.test bs=1M count=500
500+0 records in
500+0 records out
524288000 bytes (524 MB) copied, 0.384516 s, 1.4 GB/s
[root@kevin dev]# time cp -rf kevin.test /mnt/p1/
real
0m24.015s
user 0m0.021s
sys 0m1.766s
[root@kevin dev]# time cp -rf kevin.test /mnt/p2/
real
0m26.055s
user 0m0.002s
sys 0m2.741s
[root@kevin dev]# time cp -rf kevin.test /mnt/sdf/
real
0m13.752s
user 0m0.004s
sys 0m5.044s
[root@kevin dev]# time cp -rf kevin.test /tmp/
real
0m2.444s
user 0m0.002s
sys 0m0.944s
[root@kevin dev]# time sync
real
0m9.809s
user 0m0.000s
sys 0m0.001s
[root@kevin dev]#
分析这四个时间,发现与预期相悖,这是必然的,这里只是给出测试的方法。
分析:
1.拷贝该测试文件到/mnt/p1,/mntp2时间基本一致,相同raid10上的写入速度相对固定。
2.拷贝该测试文件到/mnt/sdf 中发现耗时约等于raid10阵列耗时的一半.这个预期的相悖。实际这个结果是正确的。
我的实验环境是自己的笔记本,所以性能永远受限与该物理硬盘的性能。本身虚拟机的raid0条带集对速度的提升就不会很明显,而且raid1镜像集还会有反向影响。
从物理角度分析,单盘肯定比raid10耗时少,实验总约为一半。也符合实际,因为从物理写来看,单盘写入了500M,而raid10写入了1000M。
3.还有最后一组时间,从当前位置拷贝至/tmp下。应为linux操作系统默认磁盘写是不同步的,所以我很快记录了sync命令耗时。两个时间相加约为12.2s,也非常接近单盘500M写入磁盘的时间。