(一)基本操作
1.创建pool
'''
通常在创建pool之前,需要覆盖默认的pg_num,官方推荐:
若少于5个OSD,设置pg_num为128。
5~10个OSD, 设置pg_num为512。
10~50个OSD, 设置pg_num为4096。
超过50个OSD, 可以参考pgcalc计算。
'''
[root@k-master mnt]# ceph osd lspools
1 rbd,3 ceph,4 k8s,6 ceph-demo,7 ceph-tt,
[root@k-master mnt]# ceph osd pool create rbd-test 128
pool 'rbd-test' created
[root@k-master mnt]# ceph osd lspools
1 rbd,3 ceph,4 k8s,6 ceph-demo,7 ceph-tt,8 rbd-test,
[root@k-master mnt]# rados df
POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS RD WR_OPS WR
ceph 0B 0 0 0 0 0 0 0 0B 0 0B
ceph-demo 37.8MiB 28 0 56 0 0 0 3018 14.6MiB 947 136MiB
ceph-tt 0B 0 0 0 0 0 0 0 0B 0 0B
k8s 0B 0 0 0 0 0 0 0 0B 0 0B
rbd 8.86MiB 24 0 48 0 0 0 59577 39.2MiB 90 4.43MiB
rbd-test 0B 0 0 0 0 0 0 0 0B 0 0B
total_objects 52
total_used 3.11GiB
total_avail 11.9GiB
total_space 15.0GiB
[root@k-master mnt]#
2.创建image
[root@k-master mnt]# rbd -p rbd-test ls
[root@k-master mnt]# rbd -p rbd-test create rbd --size 1G
[root@k-master mnt]# rbd -p rbd-test ls
rbd
[root@k-master mnt]# rbd -p rbd-test info rbd
rbd image 'rbd':
size 1GiB in 256 objects
order 22 (4MiB objects)
block_name_prefix: rbd_data.855f6b8b4567
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
flags:
create_timestamp: Mon Jul 27 11:01:34 2020
[root@k-master mnt]#
3.映射image
[root@k-master mnt]# rbd map -p rbd-test rbd
rbd: sysfs write failed
RBD image feature set mismatch. You can disable features unsupported by the kernel with "rbd feature disable rbd-test/rbd object-map fast-diff deep-flatten".
In some cases useful info is found in syslog - try "dmesg | tail".
rbd: map failed: (6) No such device or address
[root@k-master mnt]# rbd feature disable rbd-test/rbd object-map fast-diff deep-flatten
[root@k-master mnt]# rbd map -p rbd-test rbd
/dev/rbd2
4.格式化
[root@k-master mnt]# mkfs.ext4 /dev/rbd2
mke2fs 1.42.9 (28-Dec-2013)
Discarding device blocks: done
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=1024 blocks, Stripe width=1024 blocks
65536 inodes, 262144 blocks
13107 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=268435456
8 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768, 98304, 163840, 229376
Allocating group tables: done
Writing inode tables: done
Creating journal (8192 blocks): done
Writing superblocks and filesystem accounting information: done
[root@k-master mnt]# fdisk -l | grep rbd2
Disk /dev/rbd2: 1073 MB, 1073741824 bytes, 2097152 sectors
[root@k-master mnt]#
5.测试操作
[root@k-master ~]# mount /dev/rbd
rbd/ rbd0 rbd1 rbd2
[root@k-master ~]# mount /dev/rbd2 /mnt
[root@k-master ~]# cd /mnt
[root@k-master mnt]# ls
lost+found
[root@k-master mnt]# touch {1..4000}.log
[root@k-master mnt]# ls
6.对image进行扩容并应用到文件挂载上
[root@k-master mnt]# rbd info -p rbd-test rbd
rbd image 'rbd':
size 1GiB in 256 objects
order 22 (4MiB objects)
block_name_prefix: rbd_data.855f6b8b4567
format: 2
features: layering, exclusive-lock
flags:
create_timestamp: Mon Jul 27 11:01:34 2020
[root@k-master mnt]# rbd resize -p rbd-test rbd --size 2G
Resizing image: 100% complete...done.
[root@k-master mnt]# rbd info -p rbd-test rbd
rbd image 'rbd':
size 2GiB in 512 objects
order 22 (4MiB objects)
block_name_prefix: rbd_data.855f6b8b4567
format: 2
features: layering, exclusive-lock
flags:
create_timestamp: Mon Jul 27 11:01:34 2020
[root@k-master mnt]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 979M 0 979M 0% /dev
tmpfs 991M 0 991M 0% /dev/shm
tmpfs 991M 35M 956M 4% /run
tmpfs 991M 0 991M 0% /sys/fs/cgroup
/dev/mapper/centos-root 13G 1.9G 11G 15% /
/dev/sda1 1014M 137M 878M 14% /boot
tmpfs 991M 52K 991M 1% /var/lib/ceph/osd/ceph-0
tmpfs 991M 52K 991M 1% /var/lib/ceph/osd/ceph-1
tmpfs 199M 0 199M 0% /run/user/0
/dev/rbd2 976M 2.6M 907M 1% /mnt
[root@k-master mnt]# resize2fs /dev/rbd2
resize2fs 1.42.9 (28-Dec-2013)
Filesystem at /dev/rbd2 is mounted on /mnt; on-line resizing required
old_desc_blocks = 1, new_desc_blocks = 1
The filesystem on /dev/rbd2 is now 524288 blocks long.
[root@k-master mnt]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 979M 0 979M 0% /dev
tmpfs 991M 0 991M 0% /dev/shm
tmpfs 991M 35M 956M 4% /run
tmpfs 991M 0 991M 0% /sys/fs/cgroup
/dev/mapper/centos-root 13G 1.9G 11G 15% /
/dev/sda1 1014M 137M 878M 14% /boot
tmpfs 991M 52K 991M 1% /var/lib/ceph/osd/ceph-0
tmpfs 991M 52K 991M 1% /var/lib/ceph/osd/ceph-1
tmpfs 199M 0 199M 0% /run/user/0
/dev/rbd2 2.0G 3.1M 1.9G 1% /mnt
[root@k-master mnt]#
(二)落盘测试
落盘流程:File -> Objects[ (ino,ono) -> oid ] -> PGs[ hash(oid)&mask -> pgid ] -> OSDs[ crush(pgid)->(osd1,osd2) ]
1.查看object信息
[root@k-master mnt]# rbd -p rbd-test info rbd
rbd image 'rbd':
size 2GiB in 512 objects
order 22 (4MiB objects)
block_name_prefix: rbd_data.855f6b8b4567
format: 2
features: layering, exclusive-lock
flags:
create_timestamp: Mon Jul 27 11:01:34 2020
[root@k-master mnt]# rados -p rbd-test ls | grep rbd_data.855f6b8b4567
rbd_data.855f6b8b4567.00000000000000e0
rbd_data.855f6b8b4567.0000000000000086
rbd_data.855f6b8b4567.0000000000000087
rbd_data.855f6b8b4567.0000000000000100
rbd_data.855f6b8b4567.00000000000000a0
rbd_data.855f6b8b4567.0000000000000060
rbd_data.855f6b8b4567.0000000000000083
rbd_data.855f6b8b4567.0000000000000000
rbd_data.855f6b8b4567.0000000000000120
rbd_data.855f6b8b4567.0000000000000084
rbd_data.855f6b8b4567.0000000000000004
rbd_data.855f6b8b4567.0000000000000085
rbd_data.855f6b8b4567.0000000000000020
rbd_data.855f6b8b4567.00000000000000ff
rbd_data.855f6b8b4567.0000000000000081
rbd_data.855f6b8b4567.0000000000000080
rbd_data.855f6b8b4567.0000000000000082
[root@k-master mnt]# rados -p rbd-test stat rbd_data.855f6b8b4567.0000000000000084
rbd-test/rbd_data.855f6b8b4567.0000000000000084 mtime 2020-07-27 11:05:06.000000, size 4194304
2.描述object落盘过程
[root@k-master mnt]# ceph osd map rbd-test rbd_data.855f6b8b4567.0000000000000084
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000084' -> pg 8.ab05493e (8.3e) -> up ([2,0], p2) acting ([2,0], p2)
[root@k-master mnt]# for i in `rados -p rbd-test ls | grep rbd_data.855f6b8b4567`; do ceph osd map rbd-test $i ;done
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.00000000000000e0' -> pg 8.d08b8020 (8.20) -> up ([2,0], p2) acting ([2,0], p2)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000086' -> pg 8.b4e84288 (8.8) -> up ([1,2], p1) acting ([1,2], p1)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000087' -> pg 8.405feb48 (8.48) -> up ([0,2], p0) acting ([0,2], p0)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000100' -> pg 8.14dc7fd4 (8.54) -> up ([2,1], p2) acting ([2,1], p2)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.00000000000000a0' -> pg 8.77d68a3c (8.3c) -> up ([2,0], p2) acting ([2,0], p2)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000060' -> pg 8.4a7479d2 (8.52) -> up ([2,1], p2) acting ([2,1], p2)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000083' -> pg 8.5eaeb09a (8.1a) -> up ([0,2], p0) acting ([0,2], p0)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000000' -> pg 8.dda22216 (8.16) -> up ([1,2], p1) acting ([1,2], p1)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000120' -> pg 8.8250ee76 (8.76) -> up ([2,1], p2) acting ([2,1], p2)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000084' -> pg 8.ab05493e (8.3e) -> up ([2,0], p2) acting ([2,0], p2)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000004' -> pg 8.7e65877e (8.7e) -> up ([0,2], p0) acting ([0,2], p0)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000085' -> pg 8.e2bd2d91 (8.11) -> up ([2,1], p2) acting ([2,1], p2)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000020' -> pg 8.b9fa6a39 (8.39) -> up ([0,2], p0) acting ([0,2], p0)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.00000000000000ff' -> pg 8.738034ad (8.2d) -> up ([0,2], p0) acting ([0,2], p0)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000081' -> pg 8.6fb63a5d (8.5d) -> up ([0,2], p0) acting ([0,2], p0)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000080' -> pg 8.2a1a83bb (8.3b) -> up ([2,0], p2) acting ([2,0], p2)
osdmap e99 pool 'rbd-test' (8) object 'rbd_data.855f6b8b4567.0000000000000082' -> pg 8.c7292e3f (8.3f) -> up ([1,2], p1) acting ([1,2], p1)
[root@k-master mnt]#
[root@k-master mnt]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.01469 root default
-3 0.00980 host k-master
0 hdd 0.00490 osd.0 up 1.00000 1.00000
1 hdd 0.00490 osd.1 up 1.00000 1.00000
-5 0.00490 host k-worker001
2 hdd 0.00490 osd.2 up 1.00000 1.00000
[root@k-master mnt]#