填充参数:
depth=4,width=4,files=30000,size=4m
即一共 4**4=256 个目录,4**4*30000=7680000 个文件
总数据量 7680000*4MB=30720000MB,约为 30TB,占存储可用空间的一半。
health: HEALTH_WARN
1 MDSs report oversized cache
1 clients failing to respond to cache pressure
top - 10:20:25 up 2 days, 18:12, 2 users, load average: 31.00, 27.43, 22.01
Tasks: 839 total, 2 running, 837 sleeping, 0 stopped, 0 zombie
%Cpu(s): 19.5 us, 1.8 sy, 0.0 ni, 64.1 id, 14.4 wa, 0.0 hi, 0.1 si, 0.0 st
KiB Mem : 13147865+total, 6555780 free, 37243688 used, 87679192 buff/cache
KiB Swap: 0 total, 0 free, 0 used. 80960928 avail Mem
PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
202207 ceph 20 0 17.814g 0.017t 11244 S 20.1 13.9 172:43.11 ceph-mds
206625 root 20 0 9146480 2.162g 8080 S 205.6 1.7 1082:52 ganesha.nfsd
2467822 root 20 0 8656960 1.112g 13060 S 1080 0.9 124:33.83 java
184565 ceph 20 0 1360896 902028 12264 S 1.0 0.7 37:49.04 ceph-mon
196099 ceph 20 0 2007264 800296 7276 S 1.0 0.6 75:10.36 ceph-osd
192308 ceph 20 0 2098324 748500 7904 S 1.0 0.6 89:43.63 ceph-osd
193257 ceph 20 0 2076836 744500 7292 S 0.7 0.6 76:48.09 ceph-osd
191828 ceph 20 0 2072040 723760 7644 S 0.3 0.6 80:59.83 ceph-osd
199075 ceph 20 0 2005936 723612 7268 S 0.3 0.6 68:55.24 ceph-osd
[root@node-mds01 vdbench50407]# ceph daemon mds.node-mds1 perf dump mds
{
"mds": {
"request": 25344415,
"reply": 25344415,
"reply_latency": {
"avgcount": 25344415,
"sum": 4242.973167343,
"avgtime": 0.000167412
},
"forward": 0,
"dir_fetch": 24384,
"dir_commit": 30030,
"dir_split": 256,
"dir_merge": 0,
"inode_max": 2147483647,
"inodes": 4507742,
"inodes_top": 3081334,
"inodes_bottom": 1426151,
"inodes_pin_tail": 257,
"inodes_pinned": 105836,
"inodes_expired": 55219148,
"inodes_with_caps": 105572,
"caps": 105583,
"subtrees": 2,
"traverse": 33307765,
"traverse_hit": 9827901,
"traverse_forward": 0,
"traverse_discover": 0,
"traverse_dir_fetch": 24371,
"traverse_remote_ino": 0,
"traverse_lock": 0,
"load_cent": 207075,
"q": 43,
"exported": 0,
"exported_inodes": 0,
"imported": 0,
"imported_inodes": 0,
"openino_dir_fetch": 0,
"openino_backtrace_fetch": 0,
"openino_peer_discover": 0
}
}
[root@node-mds01 vdbench50407]# ceph daemonperf mds.node-mds01
---------------mds---------------- --mds_cache--- ------mds_log------ -mds_mem- ----mds_server----- mds_ -----objecter------ purg
req rlat fwd inos caps exi imi |stry recy recd|subm evts segs repl|ino dn |hcr hcs hsr cre |sess|actv rd wr rdwr|purg|
0 0 0 5.4M 105k 0 0 | 0 0 0 | 0 89k 130 0 |5.4M 5.4M| 0 0 0 0 | 3 | 0 0 0 0 | 0
65 13 0 5.4M 105k 0 0 | 0 0 0 | 81 88k 130 0 |5.4M 5.4M| 65 0 0 0 | 3 | 0 9 46 0 | 0
75 2 0 5.4M 105k 0 0 | 0 0 0 | 70 88k 130 0 |5.4M 5.4M| 75 0 0 0 | 3 | 0 6 37 0 | 0
73 4 0 5.4M 105k 0 0 | 0 0 0 | 77 88k 130 0 |5.4M 5.4M| 73 0 0 0 | 3 | 0 8 37 0 | 0
82 4 0 5.4M 105k 0 0 | 0 0 0 | 81 88k 130 0 |5.4M 5.4M| 82 0 0 0 | 3 | 0 12 41 0 | 0
70 6 0 5.5M 105k 0 0 | 0 0 0 | 73 88k 130 0 |5.5M 5.5M| 70 0 0 0 | 3 | 0 15 35 0 | 0
72 22 0 5.4M 105k 0 0 | 0 0 0 |100 88k 130 0 |5.4M 5.4M| 72 0 0 0 | 3 | 0 11 68 0 | 0
77 4 0 5.5M 105k 0 0 | 0 0 0 | 75 89k 130 0 |5.5M 5.5M| 77 0 0 0 | 3 | 0 9 39 0 | 0
71 6 0 5.5M 105k 0 0 | 0 0 0 | 75 89k 130 0 |5.5M 5.5M| 71 0 0 0 | 3 | 0 13 36 0 | 0
75 3 0 5.5M 105k 0 0 | 0 0 0 | 74 89k 130 0 |5.5M 5.5M| 75 0 0 0 | 3 | 0 8 38 0 | 0
74 4 0 5.6M 105k 0 0 | 0 0 0 | 74 89k 130 0 |5.6M 5.6M| 74 0 0 0 | 3 | 0 10 37 0 | 0
74 19 0 5.5M 105k 0 0 | 0 0 0 |128 89k 129 0 |5.5M 5.5M| 74 0 0 0 | 3 | 1 15 88 0 | 0
70 7 0 5.6M 105k 0 0 | 0 0 0 | 68 89k 129 0 |5.6M 5.6M| 70 0 0 0 | 3 | 0 13 36 0 | 0
74 3 0 5.6M 105k 0 0 | 0 0 0 | 77 89k 129 0 |5.6M 5.6M| 74 0 0 0 | 3 | 0 8 37 0 | 0
12 4 0 5.6M 105k 0 0 | 0 0 0 | 44 89k 129 0 |5.6M 5.6M| 12 0 0 0 | 3 | 0 2 6 0 | 0
0 0 0 5.6M 105k 0 0 | 0 0 0 | 38 89k 129 0 |5.6M 5.6M| 0 0 0 0 | 3 | 0 0 0 0 | 0
[root@node-mds01 vdbench50407]# ceph daemon mds.node-mds01 config show | grep mds_cache
"mds_cache_memory_limit": "1073741824",
"mds_cache_mid": "0.700000",
"mds_cache_reservation": "0.050000",
"mds_cache_size": "0",
"mds_cache_trim_decay_rate": "1.000000",
"mds_cache_trim_threshold": "65536",
[root@node-mds01 vdbench50407]# ceph daemon mds.node-mds01 config show | grep mds_health
"mds_health_cache_threshold": "1.500000",
"mds_health_summarize_threshold": "10",
[root@node-mds01 vdbench50407]#
扩大 mds_cache_memory_limit 到 10GB 后,再次测试,未出现 warn。
ceph daemon mds.node-mds01 config set mds_cache_memory_limit 10737418240
# used by cephfs
[mds]
mds cache memory limit = 10737418240
ansible cephall -m copy -a 'src=/etc/ceph/ceph.conf dest=/etc/ceph/ceph.conf owner=ceph group=ceph mode=0644'
systemctl restart ceph-mds@`hostname`
注意:
- 在只填充 1TB 数据时,也进行过测试,未出现 warn。说明文件数量的多少与 mds 的内存占用正相关。
- 在 Ceph Nautilus 版本中,提高了 MDS 在大 cache 下的稳定性。官方文档:V14.2.0 NAUTILUS RELEASE NOTE。因此在 Ceph Luminous 版本中不一定适用此方法。
The MDS acts as a cache for the metadata stored in RADOS. Metadata performance is very different for workloads whose metadata fits within that cache.
If your workload has more files than fit in your cache (configured using mds_cache_memory_limit or mds_cache_size settings), then make sure you test it appropriately: don’t test your system with a small number of files and then expect equivalent performance when you move to a much larger number of files.
大概意思: MDS 作为 RADOS 中的 metadata 的 cache。当在 cacha 不同时的 workload,metadata 的性能也非常不一样。如果 workload 中文件数量超过了 cache 的合适值(使用 mds_cache_memory_limit 控制),那么要确保进行充分测试。不能使用少量文件测试的性能,来等价于超大量的文件。
参考资料二:里面提到了文件数量巨大时,会出现此错误。
参考资料三:翻译了此问题的官方解释:客户端不响应服务端释放缓存的请求才造成此问题。
官方文档:Understanding MDS Cache Size Limits:说明 MDS cache limit 的原理,并不是真限制 cache ,超过阈值的 1.5 倍时会尝试回收,并给出告警。