# ceph -s
cluster:
id: 5df0ea22-7a9b-48c9-a495-a880b0b29014
health: HEALTH_OK
services:
mon: 3 daemons, quorum a29,a30,a31
mgr: a29(active), standbys: a30, a31
osd: 13 osds: 13 up, 13 in
data:
pools: 1 pools, 512 pgs
objects: 475.10k objects, 1.81TiB
usage: 3.63TiB used, 127TiB / 130TiB avail
pgs: 512 active+clean
io:
client: 27.1KiB/s rd, 105KiB/s wr, 6op/s rd, 5op/s wr
ceph-deploy mds create a29 a30 a31
# ceph osd pool create cephfs_data 128
pool 'cephfs_data' created
# ceph osd pool create cephfs_metadata 512
pool 'cephfs_metadata' created
# ceph fs new cephfs cephfs_metadata cephfs_data
new fs with metadata pool 3 and data pool 2
# ceph fs ls
name: cephfs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]
# ceph mds stat
cephfs-1/1/1 up {0=a31=up:creating}, 2 up:standby
# mkdir /cephfs
cephx
认证# cat /etc/ceph/ceph.conf
[global]
fsid = 5df0ea22-7a9b-48c9-a495-a880b0b29014
mon_initial_members = a29
mon_host = 192.168.1.179
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
public_network = 192.168.1.0/24
cephx
认证的key
# ceph auth export client.admin
export auth(auid = 18446744073709551615 key=AQB5vWVexvYuFRAAVId1xt9CRmemzYZ1c4WqyQ== with 4 caps)
[client.admin]
key = AQB5vWVexvYuFRAAVId1xt9CRmemzYZ1c4WqyQ==
caps mds = "allow *"
caps mgr = "allow *"
caps mon = "allow *"
caps osd = "allow *"
mount -t ceph 192.168.1.181:6789:/ /cephfs/ -o name=admin,secret=AQB5vWVexvYuFRAAVId1xt9CRmemzYZ1c4WqyQ==
# df -Th
192.168.1.181:6789:/ ceph 60T 0 60T 0% /cephfs
# cd /etc/ceph/
# touch admin.key
# echo AQB5vWVexvYuFRAAVId1xt9CRmemzYZ1c4WqyQ== >> /etc/ceph/admin.key
# vim /etc/fstab
192.168.1.181:6789:/ /cephfs/ -o name=admin,secret=/etc/ceph/admin.key,noatime,_netdev 0 0
# ceph -s
cluster:
id: 5df0ea22-7a9b-48c9-a495-a880b0b29014
health: HEALTH_ERR
1 MDSs report slow metadata IOs
Reduced data availability: 238 pgs inactive
21 stuck requests are blocked > 4096 sec. Implicated osds 0,1,3,7,8,9,10
services:
mon: 3 daemons, quorum a29,a30,a31
mgr: a29(active), standbys: a30, a31
mds: cephfs-1/1/1 up {0=a31=up:creating}, 2 up:standby
osd: 13 osds: 13 up, 13 in
data:
pools: 3 pools, 1152 pgs
objects: 479.59k objects, 1.83TiB
usage: 3.67TiB used, 127TiB / 130TiB avail
pgs: 20.660% pgs not active
914 active+clean
238 creating+activating
io:
client: 41.8KiB/s rd, 2.96KiB/s wr, 10op/s rd, 0op/s wr
# ceph osd pool get cephfs_data size
size: 3
# ceph osd pool get cephfs_metadata size
size: 3
# ceph osd pool set cephfs_data size 2
set pool 2 size to 2
# ceph osd pool set cephfs_metadata size 2
set pool 3 size to 2
# ceph -s
cluster:
id: 5df0ea22-7a9b-48c9-a495-a880b0b29014
health: HEALTH_WARN
Reduced data availability: 30 pgs inactive
services:
mon: 3 daemons, quorum a29,a30,a31
mgr: a29(active), standbys: a30, a31
mds: cephfs-1/1/1 up {0=a31=up:active}, 2 up:standby
osd: 13 osds: 13 up, 13 in
data:
pools: 3 pools, 1152 pgs
objects: 479.61k objects, 1.83TiB
usage: 3.67TiB used, 127TiB / 130TiB avail
pgs: 2.604% pgs not active
1122 active+clean
30 creating+activating
io:
client: 0B/s rd, 2.97KiB/s wr, 0op/s rd, 0op/s wr
# ceph health detail
HEALTH_WARN Reduced data availability: 30 pgs inactive
PG_AVAILABILITY Reduced data availability: 30 pgs inactive
pg 3.169 is stuck inactive for 16569.894513, current state creating+activating, last acting [10,12]
pg 3.16c is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
pg 3.16d is stuck inactive for 16569.894513, current state creating+activating, last acting [11,12]
pg 3.173 is stuck inactive for 16569.894513, current state creating+activating, last acting [9,12]
pg 3.177 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
pg 3.17b is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
pg 3.17d is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
pg 3.184 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
pg 3.18d is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
pg 3.193 is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
pg 3.19d is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
pg 3.1a0 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
pg 3.1a4 is stuck inactive for 16569.894513, current state creating+activating, last acting [3,12]
pg 3.1aa is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
pg 3.1ae is stuck inactive for 16569.894513, current state creating+activating, last acting [9,12]
pg 3.1b4 is stuck inactive for 16569.894513, current state creating+activating, last acting [3,12]
pg 3.1bc is stuck inactive for 16569.894513, current state creating+activating, last acting [8,12]
pg 3.1bd is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
pg 3.1bf is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
pg 3.1c3 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
pg 3.1c5 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
pg 3.1c7 is stuck inactive for 16569.894513, current state creating+activating, last acting [4,12]
pg 3.1c9 is stuck inactive for 16569.894513, current state creating+activating, last acting [0,12]
pg 3.1d5 is stuck inactive for 16569.894513, current state creating+activating, last acting [7,12]
pg 3.1d6 is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
pg 3.1eb is stuck inactive for 16569.894513, current state creating+activating, last acting [6,12]
pg 3.1ef is stuck inactive for 16569.894513, current state creating+activating, last acting [11,12]
pg 3.1f0 is stuck inactive for 16569.894513, current state creating+activating, last acting [1,12]
pg 3.1f6 is stuck inactive for 16569.894513, current state creating+activating, last acting [10,12]
pg 3.1f9 is stuck inactive for 16569.894513, current state creating+activating, last acting [8,12]
# systemctl stop [email protected]
# systemctl start [email protected]
# systemctl status [email protected]
# ceph osd pool set cephfs_data size 1
set pool 2 size to 1
r# ceph osd pool set cephfs_metadata size 1
set pool 3 size to 1
# ceph health detail
HEALTH_OK
# ceph osd pool get cephfs_data size
size: 2
# ceph osd pool get cephfs_metadata size
size: 1
ceph pg ls-by-pool cephfs_metadata | awk '{print $1,$2,$15}' | head
参考: