day04快照快照克隆开机自动挂载ceph文件系统使用MDS对象存储配置服务器端配置客户端访问Dashborad
# 1. 在rbd存储池中创建10GB的镜像,名为img1
[root@client1 ~]# rbd --help # 查看子命令
[root@client1 ~]# rbd help create # 查看子命令create的帮助
[root@client1 ~]# rbd create img1 --size 10G
[root@client1 ~]# rbd list
img1
[root@client1 ~]# rbd info img1
rbd image 'img1':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: fa91208bfdaf
block_name_prefix: rbd_data.fa91208bfdaf
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat Dec 17 10:44:17 2022
access_timestamp: Sat Dec 17 10:44:17 2022
modify_timestamp: Sat Dec 17 10:44:17 2022
# 2. 在客户端使用镜像img1,将其挂载到/mnt
[root@client1 ~]# rbd list
img1
[root@client1 ~]# rbd map img1
/dev/rbd0
[root@client1 ~]# mkfs.xfs /dev/rbd0
[root@client1 ~]# mount /dev/rbd0 /mnt/
[root@client1 ~]# rbd showmapped
id pool namespace image snap device
0 rbd img1 - /dev/rbd0
[root@client1 ~]# df -h /mnt/
Filesystem Size Used Avail Use% Mounted on
/dev/rbd0 10G 105M 9.9G 2% /mnt
# 3. 向/mnt中写入数据
[root@client1 ~]# cp /etc/hosts /mnt/
[root@client1 ~]# cp /etc/passwd /mnt/
[root@client1 ~]# ls /mnt/
hosts passwd
# 4. 创建img1的快照,名为img1-sn1
[root@client1 ~]# rbd snap create img1 --snap img1-sn1
Creating snap: 100% complete...done.
[root@client1 ~]# rbd snap ls img1
SNAPID NAME SIZE PROTECTED TIMESTAMP
4 img1-sn1 10 GiB Sat Dec 17 10:46:07 2022
# 5. 删除/mnt/中的数据
[root@client1 ~]# rm -f /mnt/*
# 6. 通过快照还原数据
[root@client1 ~]# umount /mnt/
[root@client1 ~]# rbd unmap /dev/rbd0
[root@client1 ~]# rbd help snap rollback # 查看子命令帮助
# 回滚img1到快照img1-sn1
[root@client1 ~]# rbd snap rollback img1 --snap img1-sn1
# 重新挂载
[root@client1 ~]# rbd map img1
/dev/rbd0
[root@client1 ~]# mount /dev/rbd0 /mnt/
[root@client1 ~]# ls /mnt/ # 数据还原完成
hosts passwd
[root@client1 ~]# rbd help snap protect
# 保护镜像img1的快照img1-sn1
[root@client1 ~]# rbd snap protect img1 --snap img1-sn1
[root@client1 ~]# rbd snap rm img1 --snap img1-sn1 # 不能删
# 1. 取消对快照的保护
[root@client1 ~]# rbd snap unprotect img1 --snap img1-sn1
# 2. 删除快照
[root@client1 ~]# rbd snap rm img1 --snap img1-sn1
# 3. 卸载块设备
[root@client1 ~]# umount /dev/rbd0
# 4. 取消映射
[root@client1 ~]# rbd unmap img1
# 5. 删除镜像
[root@client1 ~]# rbd rm img1
创建
保护
克隆
镜像
快照
受保护的快照
克隆的镜像
# 1. 创建名为img2的镜像,大小10GB
[root@client1 ~]# rbd create img2 --size 10G
# 2. 向镜像中写入数据
[root@client1 ~]# rbd map img2
/dev/rbd0
[root@client1 ~]# mkfs.xfs /dev/rbd0
[root@client1 ~]# mount /dev/rbd0 /mnt/
[root@client1 ~]# for i in {1..20}
> do
> echo "Hello World $i" > /mnt/file$i.txt
> done
[root@client1 ~]# ls /mnt/
file10.txt file15.txt file1.txt file5.txt
file11.txt file16.txt file20.txt file6.txt
file12.txt file17.txt file2.txt file7.txt
file13.txt file18.txt file3.txt file8.txt
file14.txt file19.txt file4.txt file9.txt
# 3. 卸载镜像
[root@client1 ~]# umount /mnt/
[root@client1 ~]# rbd unmap img2
# 4. 为img2创建名为img2-sn1快照
[root@client1 ~]# rbd snap create img2 --snap img2-sn1
# 5. 保护img2-sn1快照
[root@client1 ~]# rbd snap protect img2 --snap img2-sn1
# 6. 通过受保护的快照img2-sn1创建克隆镜像
[root@client1 ~]# rbd clone img2 --snap img2-sn1 img2-sn1-1
[root@client1 ~]# rbd clone img2 --snap img2-sn1 img2-sn1-2
# 7. 查看创建出来的、克隆的镜像
[root@client1 ~]# rbd ls
img2
img2-sn1-1
img2-sn1-2
# 8. 不同的客户端挂载不同的克隆镜像,看到的是相同的数据
[root@client1 ~]# rbd map img2-sn1-1
/dev/rbd0
[root@client1 ~]# mkdir /data
[root@client1 ~]# mount /dev/rbd0 /data
[root@client1 ~]# ls /data
file10.txt file15.txt file1.txt file5.txt
file11.txt file16.txt file20.txt file6.txt
file12.txt file17.txt file2.txt file7.txt
file13.txt file18.txt file3.txt file8.txt
file14.txt file19.txt file4.txt file9.txt
[root@ceph1 ~]# yum install -y ceph-common
[root@ceph1 ~]# rbd map img2-sn1-2
/dev/rbd0
[root@ceph1 ~]# mkdir /data
[root@ceph1 ~]# mount /dev/rbd0 /data/
[root@ceph1 ~]# ls /data/
file10.txt file15.txt file1.txt file5.txt
file11.txt file16.txt file20.txt file6.txt
file12.txt file17.txt file2.txt file7.txt
file13.txt file18.txt file3.txt file8.txt
file14.txt file19.txt file4.txt file9.txt
# 查看快照信息
[root@client1 ~]# rbd info img2 --snap img2-sn1
rbd image 'img2':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 1
id: d46eed84bb61
block_name_prefix: rbd_data.d46eed84bb61
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat Dec 17 10:58:05 2022
access_timestamp: Sat Dec 17 10:58:05 2022
modify_timestamp: Sat Dec 17 10:58:05 2022
protected: True # 受保护
# 查看克隆的快照
[root@client1 ~]# rbd info img2-sn1-2
rbd image 'img2-sn1-2':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d48fe3d6559e
block_name_prefix: rbd_data.d48fe3d6559e
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat Dec 17 10:59:53 2022
access_timestamp: Sat Dec 17 10:59:53 2022
modify_timestamp: Sat Dec 17 10:59:53 2022
parent: rbd/img2@img2-sn1 # 父对象是rbd池中img2镜像的img2-sn1快照
overlap: 10 GiB
合并父子镜像
# 把img2的数据合并到子镜像img2-sn1-2中
[root@client1 ~]# rbd flatten img2-sn1-2
# 查看状态,它就没有父镜像了
[root@client1 ~]# rbd info img2-sn1-2
rbd image 'img2-sn1-2':
size 10 GiB in 2560 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: d48fe3d6559e
block_name_prefix: rbd_data.d48fe3d6559e
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Sat Dec 17 10:59:53 2022
access_timestamp: Sat Dec 17 10:59:53 2022
modify_timestamp: Sat Dec 17 10:59:53 2022
# 删除父镜像,如果镜像正在被使用,则先取消
[root@client1 ~]# umount /data/
[root@client1 ~]# rbd unmap img2-sn1-1
# 1. 删除镜像img2-sn1-1
[root@client1 ~]# rbd rm img2-sn1-1
# 2. 取消img2-sn1的保护
[root@client1 ~]# rbd snap unprotect img2 --snap img2-sn1
# 3. 删除img2-sn1快照
[root@client1 ~]# rbd snap rm img2 --snap img2-sn1
# 4. 删除img2
[root@client1 ~]# rbd rm img2
# 因为img2-sn1-2已经是独立的镜像了,所以它还可以使用
# ceph1上的镜像没有受到影响
[root@ceph1 ~]# cat /data/file1.txt
Hello World 1
# 1. 准备镜像
[root@client1 ~]# rbd create img1 --size 10G
[root@client1 ~]# rbd map img1
/dev/rbd0
[root@client1 ~]# mkfs.xfs /dev/rbd0
# 2. 设置开机自动挂载
[root@client1 ~]# vim /etc/ceph/rbdmap # 指定要挂载的镜像及用户名、密钥
rbd/img1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
[root@client1 ~]# vim /etc/fstab # 追加
/dev/rbd/rbd/img1 /data xfs noauto 0 0
# noauto的意思是,等rbdmap服务启动后,再执行挂载
# 3. 启动rbdmap服务
[root@client1 ~]# systemctl enable rbdmap --now
# 4. reboot后查看结果
[root@client1 ~]# df -h /data/
Filesystem Size Used Avail Use% Mounted on
/dev/rbd0 10G 105M 9.9G 2% /data
元数据就是描述数据的属性。如属主、属组、权限等。
ceph文件系统中,数据和元数据是分开存储的
新建存储池
创建ceph文件系统
# 1. 新建一个名为data1的存储池,目的是存储数据,有100个PG
[root@client1 ~]# ceph osd pool create data01 100
# 2. 新建一个名为metadata1的存储池,目的是存储元数据
[root@client1 ~]# ceph osd pool create metadata01 100
# 3. 创建名为myfs1的cephfs,数据保存到data1中,元数据保存到metadata1中
[root@client1 ~]# ceph fs new myfs01 metadata01 data01
# 4. 查看存储池
[root@client1 ~]# ceph osd lspools
1 .mgr
2 rbd
3 data01
4 metadata01
[root@client1 ~]# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 180 GiB 180 GiB 206 MiB 206 MiB 0.11
TOTAL 180 GiB 180 GiB 206 MiB 206 MiB 0.11
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
.mgr 1 1 449 KiB 2 1.3 MiB 0 57 GiB
rbd 2 32 7.1 MiB 43 22 MiB 0.01 57 GiB
data01 3 94 0 B 0 0 B 0 57 GiB
metadata01 4 94 0 B 0 0 B 0 57 GiB
# 5. 查看文件系统
[root@client1 ~]# ceph fs ls
name: myfs01, metadata pool: metadata01, data pools: [data01 ]
# 6. 启动MDS服务
[root@client1 ~]# ceph orch apply mds myfs01 --placement="2 ceph1 ceph2"
# 7. 查看部署结果
[root@client1 ~]# ceph -s
cluster:
id: a4b69ab4-79dd-11ed-ae7b-000c2953b002
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph1,ceph3,ceph2 (age 92m)
mgr: ceph1.gmqorm(active, since 92m), standbys: ceph3.giqaph
mds: 1/1 daemons up, 1 standby # mds服务信息
osd: 9 osds: 9 up (since 92m), 9 in (since 4d)
...略...
# 挂载文件系统需要密码。查看密码
[root@client1 ~]# cat /etc/ceph/ceph.client.admin.keyring
[client.admin]
key = AQBmhINh1IZjHBAAvgk8m/FhyLiH4DCCrnrdPQ==
# -t 指定文件系统类型。-o是选项,提供用户名和密码
[root@client1 ~]# mkdir /mydata
[root@client1 ~]# mount.ceph 192.168.88.13:/ /mydata -o name=admin,secret=AQC5u5ZjnTA1ERAAruLAI8F1W1nyOgxZSx0UXw==
[root@client1 ~]# df -h /mydata/
Filesystem Size Used Avail Use% Mounted on
192.168.88.13:/ 57G 0 57G 0% /mydata
# 1. 在ceph1/ceph2上部署rgw服务,名为myrgw
[root@client1 ~]# ceph orch apply rgw myrgw --placement="2 ceph1 ceph2" --port 8080
[root@client1 ~]# ceph -s
cluster:
id: a4b69ab4-79dd-11ed-ae7b-000c2953b002
health: HEALTH_OK
services:
mon: 3 daemons, quorum ceph1,ceph3,ceph2 (age 101m)
mgr: ceph1.gmqorm(active, since 6h), standbys: ceph3.giqaph
mds: 1/1 daemons up, 1 standby
osd: 9 osds: 9 up (since 6h), 9 in (since 5d); 1 remapped pgs
rgw: 2 daemons active (2 hosts, 1 zones) # rgw信息
...略...
# 1. 安装amazon S3 cli工具(客户端工具)
[root@client1 ~]# yum install -y awscli
# 2. 在ceph中创建一个用户
[root@client1 ~]# radosgw-admin user create --uid=testuser --display-name="Test User" [email protected] --access-key=12345 --secret=67890
# 3. 初始化客户端
[root@client1 ~]# aws configure --profile=ceph
AWS Access Key ID [None]: 12345
AWS Secret Access Key [None]: 67890
Default region name [None]: # 回车
Default output format [None]: # 回车
# 4. 创建名为testbucket的bucket,用于存储数据
[root@client1 ~]# vim /etc/hosts # 添加以下内容
192.168.88.11 ceph1
192.168.88.12 ceph2
192.168.88.13 ceph
[root@client1 ~]# aws --profile=ceph --endpoint=http://ceph1:8080 s3 mb s3://testbucket
# 5. 上传文件
[root@client1 ~]# aws --profile=ceph --endpoint=http://ceph1:8080 --acl=public-read-write s3 cp /etc/hosts s3://testbucket/hosts.txt
# 6. 查看bucket中的数据
[root@client1 ~]# aws --profile=ceph --endpoint=http://ceph1:8080 s3 ls s3://testbucket
2022-12-17 17:05:58 241 hosts.txt
# 7. 下载数据
[root@client1 ~]# wget -O zhuji http://ceph1:8080/testbucket/hosts.txt
https://192.168.88.11:8443
,用户名为admin,密码是安装时指定的123456。知识点思维导图:FlowUs 息流 - 新一代生产力工具