[root@prometheus ~]# curl -u jhmk:jhmk1234 192.168.8.121:9230/_cluster/health?pretty
{
"cluster_name" : "cdssbd",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 3,
"number_of_data_nodes" : 3,
"active_primary_shards" : 418,
"active_shards" : 450,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
参数 | 说明 |
---|---|
cluster_name | 集群名称 |
status | 集群状态:green、yellow、red green – 所有分片分配正常 yellow – 部分副本分片未分配 red – 部分主分片未分配 |
timed_out | 是否超时 |
number_of_nodes | 集群的节点数 |
number_of_data_nodes | 集群的数据节点数 |
active_primary_shards | 集群中所有活跃的主分片数 |
active_shards | 集群中所有活跃的分片数 |
relocating_shards | 当前节点迁往其他节点的分片数量,通常 “0”。当有节点加入、退出时该值会增加 |
initializing_shards | 正在初始化的分片 |
unassigned_shards | 未分配的分片,通常 “0”。当有某个节点副本分片丢失时改变 |
delayed_unassigned_shards | — |
number_of_pending_tasks | 主节点创建索引并分配 shards 等任务。若一直未减小,则集群存在不稳定因素。 |
number_of_in_flight_fetch | fetch – 从集群各个 shard 获取 id 和 score |
task_max_waiting_in_queue_millis | 队列中等待执行的任务,存在较多则优化集群性能 |
active_shards_percent_as_number | 集群分片健康度,活跃分片数占总数的百分比 |
请求过程本身分为两个阶段:
[root@prometheus ~]# curl -s -u jhmk:jhmk1234 192.168.8.121:9230/_cat/indices | grep system-log | head -n1
green open system-log-2020.09.26 KHAiCFlBQbapLxiF_DUvJA 10 0 185956 0 363.3mb 363.3mb
[root@prometheus ~]# curl -u jhmk:jhmk1234 192.168.8.121:9230/system-log-2020.09.26/_stats?pretty
......
"search" : {
"open_contexts" : 0,
"query_total" : 16030,
"query_time_in_millis" : 66580,
"query_current" : 0,
"fetch_total" : 466,
"fetch_time_in_millis" : 6083,
"fetch_current" : 0,
"scroll_total" : 0,
"scroll_time_in_millis" : 0,
"scroll_current" : 0,
"suggest_total" : 0,
"suggest_time_in_millis" : 0,
"suggest_current" : 0
},
......
参数 | 说明 |
---|---|
open_contexts | 主动检索的次数 |
query_total | 查询总数 – 集群处理的所有查询的聚合数。 |
query_time_in_millis | 节点启动到此查询消耗总时间 query_time_in_millis / query_total 可以作为查询效率指标 比值越大,说明每个查询时间越多 |
query_current | 正在进行的查询数 – 集群当前正在处理的查询计数。 |
fetch_total | 提取总数 – 集群处理的所有fetch的聚合数。 |
fetch_time_in_millis | fetch 所花费的总时间 – 单位:ms |
fetch_current | 正在进行的 fetch 次数 – 集群中正在进行的fetch计数。 |
scroll_total | — |
scroll_time_in_millis | — |
scroll_current | — |
suggest_total | — |
suggest_time_in_millis | — |
suggest_current | — |
merge操作会消耗大量的磁盘 io 和 cpu 资源,如果索引写入很多,会看到大量的merge操作。
[root@prometheus ~]# curl -s -u jhmk:jhmk1234 192.168.8.121:9230/_nodes/stats | jq | grep -A 21 'merges'
"merges": {
"current": 0,
"current_docs": 0,
"current_size_in_bytes": 0,
"total": 31024,
"total_time_in_millis": 4230970,
"total_docs": 42895444,
"total_size_in_bytes": 99705203261,
"total_stopped_time_in_millis": 0,
"total_throttled_time_in_millis": 4879,
"total_auto_throttle_in_bytes": 3263931112
},
"refresh": {
"total": 300620,
"total_time_in_millis": 6373141,
"listeners": 0
},
"flush": {
"total": 235,
"periodic": 0,
"total_time_in_millis": 109529
},
参数 | 说明 |
---|---|
merges.current | 当前合并数 |
merges.current_docs | 当前合并文档数 |
merges.current_size_in_bytes | 当前合并大小 |
merges.total | 合并总数 |
merges.total_time_in_millis | 合并总时间 |
merges.total_docs | 合并总文档数 |
merges.total_size_in_bytes | 合并总大小 |
merges.total_stopped_time_in_millis | 合并停止总时间 |
merges.total_throttled_time_in_millis | — |
merges.total_auto_throttle_in_bytes | — |
refresh.total | 从 index-buffer 中取数据到 filesystem cache 中的过程叫 refresh |
refresh.total_time_in_millis | refresh 总时间 |
refresh.listeners | — |
flush.total | 从 filesystem cache 写入磁盘的过程叫 flush |
flush.periodic | — |
flush.total_time_in_millis | fulsh 总时间 |
http://192.168.8.119:9230/_cat/nodes?v&h=http,version,jdk,disk.total,disk.used,disk.avail,disk.used_percent,heap.current,heap.percent,heap.max,ram.current,ram.percent,ram.max,master
http version jdk disk.total disk.used disk.avail disk.used_percent heap.current heap.percent heap.max ram.current ram.percent ram.max master
192.168.8.120:9230 6.5.4 1.8.0_211 1007.8gb 502gb 505.7gb 49.82 8.2gb 82 9.9gb 60.2gb 96 62.7gb -
192.168.8.121:9230 6.5.4 1.8.0_211 1007.8gb 593.9gb 413.8gb 58.94 8gb 81 9.9gb 60.5gb 97 62.7gb *
192.168.8.119:9230 6.5.4 1.8.0_211 1007.8gb 571.9gb 435.8gb 56.75 7.8gb 78 9.9gb 62.3gb 99 62.7gb -
参数 | 说明 |
---|---|
http | http 地址 |
version | ElasticSearch 版本 |
jdk | JDK 版本 |
disk.total | 磁盘总大小 |
disk.used | 磁盘已用大小 |
disk.avail | 磁盘可用大小 |
disk.used_percent | 磁盘使用百分比 |
heap.current | 当前 heap 大小 |
heap.percent | heap 使用百分比 |
heap.max | heap 最大值 |
ram.current | ram 当前值 |
ram.percent | ram 百分比 |
ram.max | ram 最大值 |
master | master 节点 |
[root@prometheus ~]# curl -s -u jhmk:jhmk1234 http://192.168.8.119:9230/_nodes/stats | jq | grep -A 46 'jvm'
"jvm": {
"timestamp": 1601367476240,
"uptime_in_millis": 448105411,
"mem": {
"heap_used_in_bytes": 8889263408,
"heap_used_percent": 83,
"heap_committed_in_bytes": 10667687936,
"heap_max_in_bytes": 10667687936,
"non_heap_used_in_bytes": 184374952,
"non_heap_committed_in_bytes": 196108288,
"pools": {
"young": {
"used_in_bytes": 499099576,
"max_in_bytes": 558432256,
"peak_used_in_bytes": 558432256,
"peak_max_in_bytes": 558432256
},
"survivor": {
"used_in_bytes": 8377280,
"max_in_bytes": 69730304,
"peak_used_in_bytes": 69730304,
"peak_max_in_bytes": 69730304
},
"old": {
"used_in_bytes": 8381786552,
"max_in_bytes": 10039525376,
"peak_used_in_bytes": 10017623080,
"peak_max_in_bytes": 10039525376
}
}
},
"threads": {
"count": 142,
"peak_count": 190
},
"gc": {
"collectors": {
"young": {
"collection_count": 16967,
"collection_time_in_millis": 375818
},
"old": {
"collection_count": 21769,
"collection_time_in_millis": 2251278
}
}
},
参数 | 说明 |
---|---|
timestamp | — |
uptime_in_millis | — |
mem.heap_used_in_bytes | heap 使用大小 |
mem.heap_used_percent | heap 使用百分比 |
mem.heap_committed_in_bytes | heap 提交大小 |
mem.heap_max_in_bytes | heap 最大值 |
mem.non_heap_used_in_bytes | non heap 使用大小 |
mem.non_heap_committed_in_bytes | non heap 提交大小 |
mem.pool.young.used_in_bytes | — |
mem.pool.young.max_in_bytes | — |
mem.pool.young.peak_used_in_bytes | — |
mem.pool.young.peak_max_in_bytes | — |
mem.pool.survivor.used_in_bytes | — |
mem.pool.survivor.max_in_bytes | — |
mem.pool.survivor.peak_used_in_bytes | — |
mem.pool.survivor.peak_max_in_bytes | — |
mem.pool.old.used_in_bytes | — |
mem.pool.old.max_in_bytes | — |
mem.pool.old.peak_used_in_bytes | — |
mem.pool.old.peak_max_in_bytes | — |
threads.count | — |
threads.peak_count | — |
gc.collectors.young.collection_count | — |
gc.collectors.young.collection_time_in_millis | — |
gc.collectors.old.collection_count | — |
gc.collectors.old.collection_time_in_millis | — |