#部署radosgw服务,我理解为对象数据系统,会提供restful风格,客户端通过访问这个url与其进行交互
#在mgr1安装
root@ceph-mgr1:~# apt install -y radosgw
#在deploy执行
ceph@ceph-deploy:~/ceph-cluster$ ceph-deploy --overwrite-conf rgw create ceph-mgr1
#mgr1上验证服务
root@ceph-mgr1:~# ps axu|grep radosgw
ceph 22696 1.2 2.7 6277172 55844 ? Ssl 07:57 0:00 /usr/bin/radosgw -f --cluster ceph --name client.rgw.ceph-mgr1 --setuser ceph --setgroup ceph
#访问web服务,http://mgr1_ip:7480/
http://192.168.241.15:7480/
#deploy上验证radosgw服务状态,rgw已启动
ceph@ceph-deploy:~/ceph-cluster$ ceph -s
cluster:
id: 98762d01-8474-493a-806e-fcb0dfc5fdb2
health: HEALTH_WARN
1 pool(s) do not have an application enabled
services:
mon: 1 daemons, quorum ceph-mon1 (age 25h)
mgr: ceph-mgr1(active, since 24h)
osd: 11 osds: 11 up (since 23h), 11 in (since 23h)
rgw: 1 daemon active (1 hosts, 1 zones)
data:
pools: 6 pools, 142 pgs
objects: 190 objects, 1.2 MiB
usage: 127 MiB used, 220 GiB / 220 GiB avail
pgs: 0.704% pgs not active
141 active+clean
1 activating
progress:
Global Recovery Event (0s)
[............................]
验证radosgw存储池
ceph@ceph-deploy:~/ceph-cluster$ ceph osd pool ls
device_health_metrics
mypool
.rgw.root
default.rgw.log
default.rgw.control
default.rgw.meta
#块设备RDB
一般用于KVM等虚拟化技术和云服务,比如Openstack和Cloudstack
#deploy节点
#创建RBD
ceph@ceph-deploy:~/ceph-cluster$ ceph osd pool create myrbd1 64 64
pool 'myrbd1' created
ceph@ceph-deploy:~/ceph-cluster$ ceph osd pool --help|grep application
osd pool application disable
osd pool application enable
osd pool application get [
osd pool application rm
osd pool application set
ceph@ceph-deploy:~/ceph-cluster$ ceph osd pool application enable myrbd1 rbd 启动块存储池 myrbd1
ceph@ceph-deploy:~/ceph-cluster$ rbd pool init -p myrbd1 初始化块存储池
#创建并验证img,因为rbd存储池不能直接用于块设备,需要创建映像image,并把映像作为块设备使用。
ceph@ceph-deploy:~/ceph-cluster$ rbd create myimg1 --size 5G --pool myrbd1
ceph@ceph-deploy:~/ceph-cluster$ man rbd
--image-format format-id
Specifies which object layout to use. The default is 2.
--image-feature feature-name
Specifies which RBD format 2 feature should be enabled when creating an image. Multiple features can be enabled by repeating this
option multiple times. The following features are supported:
ceph@ceph-deploy:~/ceph-cluster$ rbd create myimg2 --size 3G --pool myrbd1 --image-format 2 --image-feature layering
ceph@ceph-deploy:~/ceph-cluster$ rbd ls --pool myrbd1 列出所有rbd池子中所有的image
myimg1
myimg2
ceph@ceph-deploy:~/ceph-cluster$ rbd --pool myrbd1 --image myimg1 info
rbd image 'myimg1':
size 5 GiB in 1280 objects
order 22 (4 MiB objects)
snapshot_count: 0
id: 117cbbb9aba2
block_name_prefix: rbd_data.117cbbb9aba2
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Tue Aug 24 08:14:17 2021
access_timestamp: Tue Aug 24 08:14:17 2021
modify_timestamp: Tue Aug 24 08:14:17 2021
#客户端使用块存储
查看ceph状态
ceph@ceph-deploy:~/ceph-cluster$ ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 220 GiB 220 GiB 160 MiB 160 MiB 0.07
TOTAL 220 GiB 220 GiB 160 MiB 160 MiB 0.07
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 1 0 B 0 0 B 0 70 GiB
mypool 2 32 1.2 MiB 1 3.5 MiB 0 70 GiB
.rgw.root 3 32 1.3 KiB 4 48 KiB 0 70 GiB
default.rgw.log 4 32 3.6 KiB 177 408 KiB 0 70 GiB
default.rgw.control 5 32 0 B 8 0 B 0 70 GiB
default.rgw.meta 6 8 0 B 0 0 B 0 70 GiB
myrbd1 7 64 405 B 7 48 KiB 0 70 GiB
#客户端映射img,只能映射myimg2,因为只支持layering,myimg1没有指定
ceph@ceph-deploy:~/ceph-cluster$ sudo rbd -p myrbd1 map myimg2
/dev/rbd0
#客户端验证RBD
ceph@ceph-deploy:~/ceph-cluster$ lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 20G 0 disk
└─sda1 8:1 0 20G 0 part /
sr0 11:0 1 1024M 0 rom
rbd0 252:0 0 3G 0 disk
ceph@ceph-deploy:~/ceph-cluster$ sudo fdisk -l /dev/rbd0
Disk /dev/rbd0: 3 GiB, 3221225472 bytes, 6291456 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 4194304 bytes / 4194304 bytes
#客户端格式化磁盘并挂载使用
ceph@ceph-deploy:~/ceph-cluster$ sudo mkfs.ext4 /dev/rbd0
ceph@ceph-deploy:~/ceph-cluster$ sudo mkdir /data
ceph@ceph-deploy:~/ceph-cluster$ mount /dev/rbd
rbd/ rbd0
ceph@ceph-deploy:~/ceph-cluster$ sudo mount /dev/rbd0 /data
ceph@ceph-deploy:~/ceph-cluster$ df -TH
Filesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 1.1G 0 1.1G 0% /dev
tmpfs tmpfs 207M 934k 206M 1% /run
/dev/sda1 ext4 22G 3.3G 17G 17% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
tmpfs tmpfs 207M 0 207M 0% /run/user/1000
/dev/rbd0 ext4 3.2G 9.5M 3.0G 1% /data
ceph@ceph-deploy:~/ceph-cluster$ sudo cp /etc/passwd /data/
ceph@ceph-deploy:~/ceph-cluster$ df -TH
Filesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 1.1G 0 1.1G 0% /dev
tmpfs tmpfs 207M 934k 206M 1% /run
/dev/sda1 ext4 22G 3.3G 17G 17% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
tmpfs tmpfs 207M 0 207M 0% /run/user/1000
/dev/rbd0 ext4 3.2G 9.5M 3.0G 1% /data
#客户端验证
ceph@ceph-deploy:~/ceph-cluster$ sudo dd if=/dev/zero of=/data/ceph-test-file bs=1MB count=300
300+0 records in
300+0 records out
300000000 bytes (300 MB, 286 MiB) copied, 2.80791 s, 107 MB/s
#ceph验证使用了多少空间
ceph@ceph-deploy:~/ceph-cluster$ df -TH
Filesystem Type Size Used Avail Use% Mounted on
udev devtmpfs 1.1G 0 1.1G 0% /dev
tmpfs tmpfs 207M 934k 206M 1% /run
/dev/sda1 ext4 22G 3.3G 17G 17% /
tmpfs tmpfs 1.1G 0 1.1G 0% /dev/shm
tmpfs tmpfs 5.3M 0 5.3M 0% /run/lock
tmpfs tmpfs 1.1G 0 1.1G 0% /sys/fs/cgroup
tmpfs tmpfs 207M 0 207M 0% /run/user/1000
/dev/rbd0 ext4 3.2G 310M 2.7G 11% /data
#安装MDS
可以选在任意一个节点,作为mds服务器,比如
cephfs可以同时由多个客户端共同挂载,在一处修改,其余地方立马就能同步看到
在指定的mgr1节点安装ceph-mds包
ceph@ceph-mgr1:~$ sudo apt install -y ceph-mds
deploy节点部署mds服务
创建保存metadata的pool
ceph@ceph-deploy:~/ceph-cluster$ ceph osd pool create cephfs-metadata 32 32
创建保存数据的pool
ceph@ceph-deploy:~/ceph-cluster$ ceph osd pool create cephfs-data 64 64
创建新的cephfs,
ceph@ceph-deploy:~/ceph-cluster$ ceph fs new mycephfs cephfs-metadata cephfs-data
当前存储池的状态,多出一个mds
ceph@ceph-deploy:~/ceph-cluster$ ceph -s
cluster:
id: 98762d01-8474-493a-806e-fcb0dfc5fdb2
health: HEALTH_WARN
1 pool(s) do not have an application enabled
services:
mon: 1 daemons, quorum ceph-mon1 (age 26h)
mgr: ceph-mgr1(active, since 26h)
mds: 1/1 daemons up
osd: 11 osds: 11 up (since 26h), 11 in (since 2d)
rgw: 1 daemon active (1 hosts, 1 zones)
data:
volumes: 1/1 healthy
pools: 9 pools, 297 pgs
objects: 467 objects, 843 MiB
usage: 3.0 GiB used, 217 GiB / 220 GiB avail
pgs: 297 active+clean
ceph@ceph-deploy:~/ceph-cluster$ ceph fs ls
name: mycephfs, metadata pool: cephfs-metadata, data pools: [cephfs-data ]
#查看指定cephfs的状态
ceph@ceph-deploy:~/ceph-cluster$ ceph fs status mycephfs
mycephfs - 0 clients
========
RANK STATE MDS ACTIVITY DNS INOS DIRS CAPS
0 active ceph-mgr1 Reqs: 0 /s 10 13 12 0
POOL TYPE USED AVAIL
cephfs-metadata metadata 96.0k 68.2G
cephfs-data data 0 68.2G
MDS version: ceph version 16.2.5 (0883bdea7337b95e4b611c768c0279868462204a) pacific (stable)
#验证集群状态
ceph@ceph-deploy:~/ceph-cluster$ ceph mds stat
mycephfs:1 {0=ceph-mgr1=up:active}
#客户端使用cephfs
#在客户端挂在fs,需要使用mon节点的6789端口
#现在deploy上找到key,复制出来,用于挂载时候的认证
ceph@ceph-deploy:~/ceph-cluster$ cat ceph.client.admin.keyring
[client.admin]
key = AQCnpyNhwbK4IRAAirfjj9LFQxEqE1Ww4BYRlg==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
#客户端挂载,被192.168.241.12:6789:/ 最后的这个: 坑了。。。。
注:客户端挂载之前需要先安装ceph-common提供ceph挂载类型 ,用于mount -t ceph 或者mount.ceph
centos: yum -y install epel-release && yum install -y ceph-common
debian,ubuntu: sudo apt install -y ceph-common
root@ceph-client2:/var/lib/ceph#mount -t ceph 192.168.241.12:6789:/ /ceph-data/ -o name=admin,secret=AQCnpyNhwbK4IRAAirfjj9LFQxEqE1Ww4BYRlg==
[root@ceph-client1 ~]# mount -t ceph 192.168.241.12:6789:/ /ceph_data/ -o name=admin,secret=AQCnpyNhwbK4IRAAirfjj9LFQxEqE1Ww4BYRlg==
[root@ceph-client1 ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 475M 0 475M 0% /dev
tmpfs 487M 0 487M 0% /dev/shm
tmpfs 487M 7.7M 479M 2% /run
tmpfs 487M 0 487M 0% /sys/fs/cgroup
/dev/mapper/centos-root 37G 1.5G 36G 4% /
/dev/sda1 1014M 138M 877M 14% /boot
tmpfs 98M 0 98M 0% /run/user/0
192.168.241.12:6789:/ 69G 0 69G 0% /ceph_data
#写入数据
[root@ceph-client1 ~]# dd if=/dev/zero of=/ceph_data/ddfile bs=4M count=100
100+0 records in
100+0 records out
419430400 bytes (419 MB) copied, 8.14411 s, 51.5 MB/s
root@ceph-client2:/var/lib/ceph# cp /var/log/* /ceph-data/ -r
#查看使用空间大小
root@ceph-client2:/var/lib/ceph# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 220 GiB 214 GiB 5.8 GiB 5.8 GiB 2.61
TOTAL 220 GiB 214 GiB 5.8 GiB 5.8 GiB 2.61
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 1 0 B 0 0 B 0 67 GiB
mypool 2 32 1.2 MiB 1 3.5 MiB 0 67 GiB
.rgw.root 3 32 1.3 KiB 4 48 KiB 0 67 GiB
default.rgw.log 4 32 3.6 KiB 209 408 KiB 0 67 GiB
default.rgw.control 5 32 0 B 8 0 B 0 67 GiB
default.rgw.meta 6 8 0 B 0 0 B 0 67 GiB
myrbd1 7 64 829 MiB 223 2.4 GiB 1.19 67 GiB
cephfs-metadata 8 32 474 KiB 22 1.5 MiB 0 67 GiB
cephfs-data 9 64 459 MiB 154 1.3 GiB 0.66 67 GiB
#删除数据:数据不是一下减少,而是一点点减少
[root@ceph-client1 ~]# cd /ceph_data/
[root@ceph-client1 ceph_data]# ls
alternatives.log bootstrap.log ddfile faillog journal syslog vmware vmware-network.3.log vmware-network.6.log vmware-network.9.log vmware-vmsvc-root.2.log vmware-vmtoolsd-root.log
apt btmp dist-upgrade fontconfig.log kern.log tallylog vmware-network.1.log vmware-network.4.log vmware-network.7.log vmware-network.log vmware-vmsvc-root.3.log wtmp
auth.log ceph dpkg.log installer lastlog unattended-upgrades vmware-network.2.log vmware-network.5.log vmware-network.8.log vmware-vmsvc-root.1.log vmware-vmsvc-root.log
[root@ceph-client1 ceph_data]# rm vmware
vmware/ vmware-network.2.log vmware-network.4.log vmware-network.6.log vmware-network.8.log vmware-network.log vmware-vmsvc-root.2.log vmware-vmsvc-root.log
vmware-network.1.log vmware-network.3.log vmware-network.5.log vmware-network.7.log vmware-network.9.log vmware-vmsvc-root.1.log vmware-vmsvc-root.3.log vmware-vmtoolsd-root.log
[root@ceph-client1 ceph_data]# rm ./*log -rf
root@ceph-client2:/var/lib/ceph# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 220 GiB 214 GiB 5.8 GiB 5.8 GiB 2.61
TOTAL 220 GiB 214 GiB 5.8 GiB 5.8 GiB 2.61
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 1 0 B 0 0 B 0 67 GiB
mypool 2 32 1.2 MiB 1 3.5 MiB 0 67 GiB
.rgw.root 3 32 1.3 KiB 4 48 KiB 0 67 GiB
default.rgw.log 4 32 3.6 KiB 209 408 KiB 0 67 GiB
default.rgw.control 5 32 0 B 8 0 B 0 67 GiB
default.rgw.meta 6 8 0 B 0 0 B 0 67 GiB
myrbd1 7 64 829 MiB 223 2.4 GiB 1.19 67 GiB
cephfs-metadata 8 32 474 KiB 22 1.5 MiB 0 67 GiB
cephfs-data 9 64 459 MiB 154 1.3 GiB 0.66 67 GiB
root@ceph-client2:/var/lib/ceph# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 220 GiB 214 GiB 5.7 GiB 5.7 GiB 2.61
TOTAL 220 GiB 214 GiB 5.7 GiB 5.7 GiB 2.61
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 1 0 B 0 0 B 0 67 GiB
mypool 2 32 1.2 MiB 1 3.5 MiB 0 67 GiB
.rgw.root 3 32 1.3 KiB 4 48 KiB 0 67 GiB
default.rgw.log 4 32 3.6 KiB 209 408 KiB 0 67 GiB
default.rgw.control 5 32 0 B 8 0 B 0 67 GiB
default.rgw.meta 6 8 0 B 0 0 B 0 67 GiB
myrbd1 7 64 829 MiB 223 2.4 GiB 1.19 67 GiB
cephfs-metadata 8 32 551 KiB 23 1.7 MiB 0 67 GiB
cephfs-data 9 64 457 MiB 140 1.3 GiB 0.66 67 GiB
root@ceph-client2:/var/lib/ceph# ceph df
--- RAW STORAGE ---
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 220 GiB 214 GiB 5.7 GiB 5.7 GiB 2.61
TOTAL 220 GiB 214 GiB 5.7 GiB 5.7 GiB 2.61
--- POOLS ---
POOL ID PGS STORED OBJECTS USED %USED MAX AVAIL
device_health_metrics 1 1 0 B 0 0 B 0 67 GiB
mypool 2 32 1.2 MiB 1 3.5 MiB 0 67 GiB
.rgw.root 3 32 1.3 KiB 4 48 KiB 0 67 GiB
default.rgw.log 4 32 3.6 KiB 209 408 KiB 0 67 GiB
default.rgw.control 5 32 0 B 8 0 B 0 67 GiB
default.rgw.meta 6 8 0 B 0 0 B 0 67 GiB
myrbd1 7 64 829 MiB 223 2.4 GiB 1.19 67 GiB
cephfs-metadata 8 32 557 KiB 23 1.7 MiB 0 67 GiB
cephfs-data 9 64 455 MiB 129 1.3 GiB 0.66 67 GiB