因为我的环境是docker下部署的, 所以需要重启这些容器, 手工配置IP
docker start deploy
docker start mon1
docker start mon2
docker start mon3
docker start mon4
docker start mon5
docker start osd1
docker start osd2
docker start osd3
docker start osd4
配置IP
./pipework.sh docker0 -i eth0 deploy 172.17.0.1/[email protected]
./pipework.sh docker0 -i eth0 mon1 172.17.0.2/[email protected]
./pipework.sh docker0 -i eth0 mon2 172.17.0.3/[email protected]
./pipework.sh docker0 -i eth0 mon3 172.17.0.4/[email protected]
./pipework.sh docker0 -i eth0 osd1 172.17.0.5/[email protected]
./pipework.sh docker0 -i eth0 osd2 172.17.0.6/[email protected]
./pipework.sh docker0 -i eth0 osd3 172.17.0.7/[email protected]
./pipework.sh docker0 -i eth0 osd4 172.17.0.8/[email protected]
./pipework.sh docker0 -i eth0 mon4 172.17.0.9/[email protected]
./pipework.sh docker0 -i eth0 mon5 172.17.0.10/[email protected]
./pipework.sh docker0 -i eth1 deploy 172.18.0.1/16
./pipework.sh docker0 -i eth1 mon1 172.18.0.2/16
./pipework.sh docker0 -i eth1 mon2 172.18.0.3/16
./pipework.sh docker0 -i eth1 mon3 172.18.0.4/16
./pipework.sh docker0 -i eth1 osd1 172.18.0.5/16
./pipework.sh docker0 -i eth1 osd2 172.18.0.6/16
./pipework.sh docker0 -i eth1 osd3 172.18.0.7/16
./pipework.sh docker0 -i eth1 osd4 172.18.0.8/16
./pipework.sh docker0 -i eth1 mon4 172.18.0.9/16
./pipework.sh docker0 -i eth1 mon5 172.18.0.10/16
./pipework.sh docker0 -i eth2 deploy 172.19.0.1/16
./pipework.sh docker0 -i eth2 mon1 172.19.0.2/16
./pipework.sh docker0 -i eth2 mon2 172.19.0.3/16
./pipework.sh docker0 -i eth2 mon3 172.19.0.4/16
./pipework.sh docker0 -i eth2 osd1 172.19.0.5/16
./pipework.sh docker0 -i eth2 osd2 172.19.0.6/16
./pipework.sh docker0 -i eth2 osd3 172.19.0.7/16
./pipework.sh docker0 -i eth2 osd4 172.19.0.8/16
./pipework.sh docker0 -i eth2 mon4 172.19.0.9/16
./pipework.sh docker0 -i eth2 mon5 172.19.0.10/16
启动mon, osd
service ceph start mon.mon1
...
ceph-mon -i mon4 --pid-file /var/run/ceph/mon.mon4.pid -c /etc/ceph/ceph.conf --cluster ceph --mon-data /data01/ceph/mon/ceph-4
service ceph start osd1
... 略
接下来可以在宿主机管理这个ceph了, 注意如果有认证的话, 需要拷贝配置文件和认证文件到宿主机, 如果文件名非默认的话, 那么需要在命令中指定配置文件和认证文件.
[root@localhost rbd0]# ceph osd lspools
0 rbd
创建pool
exp.
ceph osd pool create {pool-name} {pg-num} [{pgp-num}] [replicated] \
[crush-ruleset-name]
[root@localhost rbd0]# ceph osd pool create pool1 128 128 replicated
[root@localhost rbd0]# ceph osd lspools
0 rbd,1 pool1,
创建块设备镜像
[root@localhost rbd0]# rbd create --help
在pool1池创建一个镜像名为img2, 大小8192MB, 格式为2(支持克隆)
[root@localhost rbd0]# rbd create --size 8192 img2 --image-format 2 -p pool1
接着创建
rbd create --size 8192 img2 -p rbd
rbd create --size 8192 img1 -p rbd
rbd create --size 8192 img1 -p pool1
接下来可以在宿主机(即更新了内核的主机)映射这个块设备镜像了.
[root@localhost ~]# rbd map img1 --pool pool1 -m 172.17.0.2 -k /etc/ceph/ceph.client.admin.keyring
/dev/rbd0
[root@localhost ~]# fdisk -c -u /dev/rbd0
Welcome to fdisk (util-linux 2.23.2).
Changes will remain in memory only, until you decide to write them.
Be careful before using the write command.
Device does not contain a recognized partition table
Building a new DOS disklabel with disk identifier 0x2b2bb069.
Command (m for help): p
Disk /dev/rbd0: 8589 MB, 8589934592 bytes, 16777216 sectors
Units = sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
Disk label type: dos
Disk identifier: 0x2b2bb069
Device Boot Start End Blocks Id System
创建文件系统
[root@localhost ~]# mkfs.xfs /dev/rbd0
log stripe unit (4194304 bytes) is too large (maximum is 256KiB)
log stripe unit adjusted to 32KiB
meta-data=/dev/rbd0 isize=256 agcount=9, agsize=261120 blks
= sectsz=512 attr=2, projid32bit=1
= crc=0
data = bsize=4096 blocks=2097152, imaxpct=25
= sunit=1024 swidth=1024 blks
naming =version 2 bsize=4096 ascii-ci=0 ftype=0
log =internal log bsize=4096 blocks=2560, version=2
= sectsz=512 sunit=8 blks, lazy-count=1
realtime =none extsz=4096 blocks=0, rtextents=0
[root@localhost ~]# mkdir /rbd0
[root@localhost ~]# mount /dev/rbd0 /rbd0
df -h
/dev/rbd0 8.0G 33M 8.0G 1% /rbd0
简单的使用测试 :
[root@localhost rbd0]# dd if=/dev/zero of=./test.img bs=8192k count=512 oflag=direct
512+0 records in
512+0 records out
4294967296 bytes (4.3 GB) copied, 53.148 s, 80.8 MB/s
[root@localhost ~]# ceph -s
cluster f649b128-963c-4802-ae17-5a76f36c4c76
health HEALTH_OK
monmap e8: 5 mons at {mon1=172.17.0.2:6789/0,mon2=172.17.0.3:6789/0,mon3=172.17.0.4:6789/0,mon4=172.17.0.9:6789/0,mon5=172.17.0.10:6789/0}, election epoch 50, quorum 0,1,2,3,4 mon1,mon2,mon3,mon4,mon5
osdmap e59: 4 osds: 4 up, 4 in
pgmap v21762: 256 pgs, 2 pools, 1773 MB data, 456 objects
444 GB used, 1192 GB / 1636 GB avail
256 active+clean
client io 105 MB/s wr, 420 op/s
其他测试 :
[root@localhost rbd1]# rbd bench-write img2 -p rbd
bench-write io_size 4096 io_threads 16 bytes 1073741824 pattern seq
SEC OPS OPS/SEC BYTES/SEC
1 30097 30059.61 123124145.56
2 52487 25616.02 104923229.04
3 77155 25716.23 105333691.08
4 102370 24461.42 100193961.33
5 128880 25772.12 105562598.27
6 150406 24981.22 102323056.68
7 174073 24863.73 101841845.28
8 192126 24012.55 98355418.43
9 220293 24475.64 100252211.29
10 241683 24168.03 98992238.19
elapsed: 11 ops: 262144 ops/sec: 23773.85 bytes/sec: 97377672.90
快照 :
[root@localhost rbd1]# rbd snap create img2 -p rbd --snap img2@123
[参考]
1. http://elrepo.org/
2. https://github.com/ceph/ceph-kmod-rpm
3. http://ceph.com/docs/master/rbd/rbd/