03 ceph 对象存储 实战


创建一个pool,指定pg和pgp的数目

root@admin-node:~# ceph osd pool create test 10 10
pool 'test' created




查看有哪些pool

root@admin-node:~# rados lspools
rbd
.rgw.root
.rgw.control
.rgw
.rgw.gc
.users.uid
test



查看pool信息

root@admin-node:~# ceph osd dump
epoch 67
fsid 34e6e6b5-bb3e-4185-a8ee-01837c678db4
created 2015-10-22 12:14:52.060418
modified 2015-10-23 16:01:25.304725
flags 
pool 0 'rbd' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 1 flags hashpspool stripe_width 0
pool 1 '.rgw.root' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 14 owner 18446744073709551615 flags hashpspool 

stripe_width 0
pool 2 '.rgw.control' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 16 owner 18446744073709551615 flags hashpspool 

stripe_width 0
pool 3 '.rgw' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 18 owner 18446744073709551615 flags hashpspool 

stripe_width 0
pool 4 '.rgw.gc' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 19 owner 18446744073709551615 flags hashpspool 

stripe_width 0
pool 5 '.users.uid' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 8 pgp_num 8 last_change 20 owner 18446744073709551615 flags hashpspool 

stripe_width 0
pool 6 'test' replicated size 2 min_size 1 crush_ruleset 0 object_hash rjenkins pg_num 10 pgp_num 10 last_change 64 flags hashpspool stripe_width 0
max_osd 3
osd.0 up   in  weight 1 up_from 66 up_thru 66 down_at 64 last_clean_interval [52,65) 172.16.66.141:6800/1258 172.16.66.141:6804/1001258 172.16.66.141:6805/1001258 

172.16.66.141:6806/1001258 exists,up 305b777a-8376-41a2-a11b-4bd51de4bfe2
osd.1 up   in  weight 1 up_from 61 up_thru 66 down_at 60 last_clean_interval [51,60) 172.16.66.140:6800/1271 172.16.66.140:6804/1001271 172.16.66.140:6805/1001271 

172.16.66.140:6806/1001271 exists,up a7fc8f11-699e-41ec-8dec-20b2747c898e
osd.2 up   in  weight 1 up_from 51 up_thru 66 down_at 49 last_clean_interval [24,48) 172.16.66.142:6801/1345 172.16.66.142:6802/1345 172.16.66.142:6803/1345 

172.16.66.142:6804/1345 exists,up eb841029-51db-4b38-8734-28655b294308
root@admin-node:~#


从上面的ceph osd dump 反回数据可以看出osd对应的节点


osd.0 172.16.66.141
osd.1 172.16.66.140
osd.2 172.16.66.142


实际上:

172.16.66.140 node3
172.16.66.141 node2 
172.16.66.142 node1


查看数据默认保存的份数,最小保存份数

root@admin-node:~# ceph osd pool get test size
size: 2

root@admin-node:~# ceph osd pool get test min_size
min_size: 1



创建一个文件,

 cat /dev/zero > haha.tar.gz

查看文件的属性

root@admin-node:~# ll -h
-rw-r--r--  1 root root  44M 10月 23 16:16 haha.tar.gz

root@admin-node:~# md5sum haha.tar.gz 
80afbc71d5a33dd8ba4dc5ea9a0197b3  haha.tar.gz

root@admin-node:~# stat haha.tar.gz 
  文件:"haha.tar.gz"
  大小:45969408  	块:89784      IO 块:4096   普通文件
设备:801h/2049d	Inode:27525136    硬链接:1
权限:(0644/-rw-r--r--)  Uid:(    0/    root)   Gid:(    0/    root)
最近访问:2015-10-23 16:18:23.544343698 +0800
最近更改:2015-10-23 16:16:53.792245884 +0800
最近改动:2015-10-23 16:16:53.792245884 +0800
创建时间:-


推送这个文件到test这个pool为my-object

rados -p test put my-object haha.tar.gz

看看 list pool的信息

root@admin-node:~# rados -p test ls
my-object
root@admin-node:~#


查看对象存放位置

root@admin-node:~# ceph osd map test my-object
osdmap e67 pool 'test' (6) object 'my-object' -> pg 6.c5034eb8 (6.8) -> up ([0,2], p0) acting ([0,2], p0)

数据理解分析:

my-object对象 存放在test 这个pool中

数据保存在 6.8中

根据保存的份数来看

数据保存了两份, 分别是osd.0 和osd.2 ,osd.1不会保存刚创建的数据









我有3个osd

root@admin-node:~# ceph -s
    cluster 34e6e6b5-bb3e-4185-a8ee-01837c678db4
     health HEALTH_OK
     monmap e3: 3 mons at {node1=172.16.66.142:6789/0,node2=172.16.66.141:6789/0,node3=172.16.66.140:6789/0}
            election epoch 42, quorum 0,1,2 node3,node2,node1
     osdmap e67: 3 osds: 3 up, 3 in
      pgmap v1349: 114 pgs, 7 pools, 44892 kB data, 44 objects
            19956 MB used, 1375 GB / 1470 GB avail
                 114 active+clean




到osd.0 osd.1 osd.2 来看看

root@node1:~# md5sum /var/local/osd2/current/6.8_head/my-object__head_C5034EB8__6 
80afbc71d5a33dd8ba4dc5ea9a0197b3  /var/local/osd2/current/6.8_head/my-object__head_C5034EB8__6

root@node2:~# md5sum /var/local/osd0/current/6.8_head/my-object__head_C5034EB8__6 
80afbc71d5a33dd8ba4dc5ea9a0197b3  /var/local/osd0/current/6.8_head/my-object__head_C5034EB8__6

文件不存在在osd.1中

root@node3:~# ls /var/local/osd1/current/
0.0_head/      0.1_head/      0.2d_head/     0.3c_head/     1.1_head/      2.5_head/      4.6_head/      6.4_head/
0.11_head/     0.20_head/     0.2e_head/     0.3_head/      1.2_head/      2.6_head/      4.7_head/      6.5_head/
0.12_head/     0.21_head/     0.2f_head/     0.4_head/      1.3_head/      3.0_head/      5.0_head/      6.6_head/
0.13_head/     0.22_head/     0.2_head/      0.8_head/      1.5_head/      3.1_head/      5.1_head/      6.7_head/
0.16_head/     0.23_head/     0.33_head/     0.9_head/      1.6_head/      3.4_head/      5.2_head/      6.9_head/
0.17_head/     0.25_head/     0.34_head/     0.a_head/      1.7_head/      3.5_head/      5.3_head/      commit_op_seq
0.1a_head/     0.26_head/     0.35_head/     0.b_head/      2.1_head/      3.6_head/      5.4_head/      meta/
0.1b_head/     0.27_head/     0.36_head/     0.c_head/      2.1_TEMP/      3.7_head/      5.5_head/      nosnap
0.1c_head/     0.28_head/     0.37_head/     0.d_head/      2.2_head/      4.0_head/      5.6_head/      omap/
0.1d_head/     0.2a_head/     0.38_head/     0.e_head/      2.4_head/      4.1_head/      6.0_head/      
0.1e_head/     0.2b_head/     0.39_head/     1.0_head/      2.4_TEMP/      4.4_head/      6.2_head/



本文出自 “魂斗罗” 博客,谢绝转载!

你可能感兴趣的:(创建,pg,查看,pool,OSD,推送一个文件,【ceph】)