Prometheus常用告警规则

Prometheus常用告警规则

参考: https://awesome-prometheus-alerts.grep.to/rules

热加载告警规则

启动参数中加入: --web.enable-lifecycle参数, 然后终端执行如下POST请求
curl -X POST http://IP:port/-/reload

Prometheus.rules

cat > /data/prometheus/conf/rules/Prometheus.yaml << 'EOF'
groups:
- name: Prometheus.rules
  rules:
  - alert: PrometheusAllTargetsMissing
    expr: count by (job) (up) == 0
    for: 2m
    labels:
      severity: critical
    annotations:
      title: 'Prometheus all targets missing'
      description: "A Prometheus job does not have living target anymore."
  - alert: PrometheusConfigurationReloadFailure
    expr: prometheus_config_last_reload_successful != 1
    for: 0m
    labels:
      severity: warning
    annotations:
      title: 'Prometheus configuration reload failure'
      description: "Prometheus: 【{{ $labels.instance }}】 configuration reload error."
  - alert: PrometheusTooManyRestarts
    expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
    for: 0m
    labels:
      severity: warning
    annotations:
      title: 'Prometheus too many restarts'
      description: "Prometheus: 【{{ $labels.instance }}】 has restarted more than twice in the last 15 minutes. It might be crashlooping."
  - alert: PrometheusAlertmanagerConfigurationReloadFailure
    expr: alertmanager_config_last_reload_successful != 1
    for: 0m
    labels:
      severity: warning
    annotations:
      title: 'Prometheus AlertManager configuration reload failure'
      description: "AlertManager: 【{{ $labels.instance }}】 configuration reload error"
  - alert: PrometheusNotificationsBacklog
    expr: min_over_time(prometheus_notifications_queue_length[10m]) > 0
    for: 1m
    labels:
      severity: warning
    annotations:
      title: 'Prometheus notifications backlog'
      description: "Prometheus: 【{{ $labels.instance }}】 The notification queue has not been empty for 10 minutes"
  - alert: PrometheusAlertmanagerNotificationFailing
    expr: rate(alertmanager_notifications_failed_total[1m]) > 0
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'Prometheus AlertManager notification failing'
      description: "AlertManager: 【{{ $labels.instance }}】 is failing sending notifications"
  - alert: PrometheusTsdbCheckpointCreationFailures
    expr: increase(prometheus_tsdb_checkpoint_creations_failed_total[1m]) > 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Prometheus TSDB checkpoint creation failures'
      description: "Prometheus: 【{{ $labels.instance }}】 encountered {{ $value }} checkpoint creation failures"
  - alert: PrometheusTsdbCheckpointDeletionFailures
    expr: increase(prometheus_tsdb_checkpoint_deletions_failed_total[1m]) > 0
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'Prometheus TSDB checkpoint deletion failures'
      description: "Prometheus: 【{{ $labels.instance }}】 encountered {{ $value }} checkpoint deletion failures"
  - alert: PrometheusTsdbCompactionsFailed
    expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'Prometheus TSDB compactions failed'
      description: "Prometheus: 【{{ $labels.instance }}】 encountered {{ $value }} TSDB compactions failures"
  - alert: PrometheusTsdbHeadTruncationsFailed
    expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'Prometheus TSDB head truncations failed'
      description: "Prometheus: 【{{ $labels.instance }}】 encountered {{ $value }} TSDB head truncation failures"
  - alert: PrometheusTsdbReloadFailures
    expr: increase(prometheus_tsdb_reloads_failures_total[1m]) > 0
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'Prometheus TSDB reload failures'
      description: "Prometheus: 【{{ $labels.instance }}】 encountered {{ $value }} TSDB reload failures"
EOF

Host.rules

cat > /data/prometheus/conf/rules/Hosts.yaml << 'EOF'
groups:
- name: Hosts.rules
  rules:
  ## Custom By wangshui
  - alert: HostDown
    expr: up{job=~"node-exporter|prometheus|grafana|alertmanager"} == 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Instance down'
      description: "主机: 【{{ $labels.instance }}】has been down for more than 1 minute"

  - alert: HostCpuLoadAvage
    expr: sum(node_load5) by (instance) > 10
    for: 1m
    annotations:
      title: "5分钟内CPU负载过高"
      description: "主机: 【{{ $labels.instance }}】 5五分钟内CPU负载超过10 (当前值:{{ $value }})"
    labels:
      severity: 'warning'

  - alert: HostCpuUsage
    expr: (1-((sum(increase(node_cpu_seconds_total{mode="idle"}[5m])) by (instance))/ (sum(increase(node_cpu_seconds_total[5m])) by (instance))))*100 > 80
    for: 1m
    annotations:
      title: "CPU使用率过高"
      description: "主机: 【{{ $labels.instance }}】 5五分钟内CPU使用率超过80% (当前值:{{ $value }})"
    labels:
      severity: 'warning'

  - alert: HostMemoryUsage
    expr: (1-((node_memory_Buffers_bytes + node_memory_Cached_bytes + node_memory_MemFree_bytes)/node_memory_MemTotal_bytes))*100 > 80
    for: 1m
    annotations:
      title: "主机内存使用率超过80%"
      description: "主机: 【{{ $labels.instance }}】 内存使用率超过80% (当前使用率:{{ $value }}%)"
    labels:
      severity: 'warning'

  - alert: HostIOWait
    expr: ((sum(increase(node_cpu_seconds_total{mode="iowait"}[5m])) by (instance))/(sum(increase(node_cpu_seconds_total[5m])) by (instance)))*100 > 10
    for: 1m
    annotations:
      title: "磁盘负载过高"
      description: "主机: 【{{ $labels.instance }}】 5五分钟内磁盘负载过高 (当前负载值:{{ $value }})"
    labels:
      severity: 'warning'

  - alert: HostFileSystemUsage
    expr: (1-(node_filesystem_free_bytes{fstype=~"ext4|xfs",mountpoint!~".*tmp|.*boot" }/node_filesystem_size_bytes{fstype=~"ext4|xfs",mountpoint!~".*tmp|.*boot" }))*100 > 70
    for: 1m
    annotations:
      title: "磁盘空间剩余不足"
      description: "主机: 【{{ $labels.instance }}】 {{ $labels.mountpoint }}分区使用率超过70%, 当前值使用率:{{ $value }}%"
    labels:
      severity: 'warning'

  - alert: HostSwapIsFillingUp
    expr: (1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80
    for: 2m
    labels:
      severity: 'warning'
    annotations:
      title: "主机swap分区不足"
      description: "主机: 【{{ $labels.instance }}】 swap分区使用超过 (>80%), 当前值使用率: {{ $value }}%"

  - alert: HostNetworkConnection-ESTABLISHED
    expr:  sum(node_netstat_Tcp_CurrEstab) by (instance) > 1000
    for: 5m
    labels:
      severity: 'warning'
    annotations:
      title: "主机ESTABLISHED连接数过高"
      description: "主机: 【{{ $labels.instance }}】 ESTABLISHED连接数超过1000, 当前ESTABLISHED连接数: {{ $value }}"

  - alert: HostNetworkConnection-TIME_WAIT
    expr:  sum(node_sockstat_TCP_tw) by (instance) > 1000
    for: 5m
    labels:
      severity: 'warning'
    annotations:
      title: "主机TIME_WAIT连接数过高"
      description: "主机: 【{{ $labels.instance }}】 TIME_WAIT连接数超过1000, 当前TIME_WAIT连接数: {{ $value }}"

  - alert: HostUnusualNetworkThroughputIn
    expr:  sum by (instance, device) (rate(node_network_receive_bytes_total{device=~"ens.*"}[2m])) / 1024 / 1024 > 100
    for: 5m
    labels:
      severity: 'warning'
    annotations:
      title: "主机网卡入口流量过高"
      description: "主机: 【{{ $labels.instance }}】, 网卡: {{ $labels.device }} 入口流量超过 (> 100 MB/s), 当前值: {{ $value }}"

  - alert: HostUnusualNetworkThroughputOut
    expr: sum by (instance, device) (rate(node_network_transmit_bytes_total{device=~"ens.*"}[2m])) / 1024 / 1024 > 100
    for: 5m
    labels:
      severity: 'warning'
    annotations:
      title: "主机网卡出口流量过高"
      description: "主机: 【{{ $labels.instance }}】, 网卡: {{ $labels.device }} 出口流量超过 (> 100 MB/s), 当前值: {{ $value }}"

  - alert: HostUnusualDiskReadRate
    expr: sum by (instance, device) (rate(node_disk_read_bytes_total{device=~"sd.*"}[2m])) / 1024 / 1024 > 50
    for: 5m
    labels:
      severity: 'warning'
    annotations:
      title: "主机磁盘读取速率过高"
      description: "主机: 【{{ $labels.instance }}】, 磁盘: {{ $labels.device }} 读取速度超过(50 MB/s), 当前值: {{ $value }}"

  - alert: HostUnusualDiskWriteRate
    expr: sum by (instance, device) (rate(node_disk_written_bytes_total{device=~"sd.*"}[2m])) / 1024 / 1024 > 50
    for: 2m
    labels:
      severity: 'warning'
    annotations:
      title: "主机磁盘写入速率过高"
      description: "主机: 【{{ $labels.instance }}】, 磁盘: {{ $labels.device }} 写入速度超过(50 MB/s), 当前值: {{ $value }}"

  - alert: HostOutOfInodes
    expr: node_filesystem_files_free{fstype=~"ext4|xfs",mountpoint!~".*tmp|.*boot" } / node_filesystem_files{fstype=~"ext4|xfs",mountpoint!~".*tmp|.*boot" } * 100 < 10
    for: 2m
    labels:
      severity: 'warning'
    annotations:
      title: "主机分区Inode节点不足"
      description: "主机: 【{{ $labels.instance }}】 {{ $labels.mountpoint }}分区inode节点不足 (可用值小于{{ $value }}%)"

  - alert: HostUnusualDiskReadLatency
    expr: rate(node_disk_read_time_seconds_total{device=~"sd.*"}[1m]) / rate(node_disk_reads_completed_total{device=~"sd.*"}[1m]) > 0.1 and rate(node_disk_reads_completed_total{device=~"sd.*"}[1m]) > 0
    for: 2m
    labels:
      severity: 'warning'
    annotations:
      title: "主机磁盘Read延迟过高"
      description: "主机: 【{{ $labels.instance }}】, 磁盘: {{ $labels.device }} Read延迟过高 (read operations > 100ms), 当前延迟值: {{ $value }}ms"

  - alert: HostUnusualDiskWriteLatency
    expr: rate(node_disk_write_time_seconds_total{device=~"sd.*"}[1m]) / rate(node_disk_writes_completed_total{device=~"sd.*"}[1m]) > 0.1 and rate(node_disk_writes_completed_total{device=~"sd.*"}[1m]) > 0
    for: 2m
    labels:
      severity: 'warning'
    annotations:
      title: "主机磁盘Write延迟过高"
      description: "主机: 【{{ $labels.instance }}】, 磁盘: {{ $labels.device }} Write延迟过高 (write operations > 100ms), 当前延迟值: {{ $value }}ms"
EOF

Blackbox.rules

cat > /data/prometheus/conf/rules/Blackbox.yaml << 'EOF'
groups:
- name: Blackbox.rules
  rules:
  - alert: HostConnectionFailure
    expr: probe_success{job="ping-status"} == 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: Host Connection Failure
      description: "主机 【{{ $labels.instance }}】 cannot be connected"
      
  - alert: ServiceConnectionFailure
    expr: probe_success{job="port-status"} == 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: Service Connection Failure
      description: "服务 【{{ $labels.server }}】 on 主机 【{{ $labels.instance }}】 cannot be connected"

  - alert: BlackboxSlowProbeOnServer
    expr: avg_over_time(probe_duration_seconds{job="port-status"}[1m]) > 1
    for: 1m
    labels:
      severity: warning
    annotations:
      title: Service probe timeout
      description: "服务 【{{ $labels.server }}】 on 主机 【{{ $labels.instance }}】Blackbox probe took more than 1s to complete, Current Value: {{ $value }}s"

  - alert: BlackboxSlowProbeOnWebsite
    expr: avg_over_time(probe_duration_seconds{job="http-status"}[1m]) > 1
    for: 1m
    labels:
      severity: warning
    annotations:
      title: Service probe timeout
      description: "网站 【{{ $labels.instance }}】 Blackbox probe took more than 1s to complete, Current Value: {{ $value }}s"

  - alert: BlackboxProbeHttpFailure
    expr: probe_http_status_code <= 199 OR probe_http_status_code >= 400
    for: 0m
    labels:
      severity: critical
      service: web
    annotations:
      title: Blackbox probe HTTP failure
      description: "网站: 【{{ $labels.instance }}】HTTP status code is exception, Current status code: {{ $value }}"

  - alert: BlackboxSslCertificateWillExpireSoonIn30days
    expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 30
    for: 0m
    labels:
      severity: warning
    annotations:
      title: Blackbox SSL certificate will expire soon
      description: "网站: 【{{ $labels.instance }}】 SSL certificate expires in 30 days"
  - alert: BlackboxSslCertificateWillExpireSoonIn3days
    expr: probe_ssl_earliest_cert_expiry - time() < 86400 * 3
    for: 0m
    labels:
      severity: critical
    annotations:
      title: Blackbox SSL certificate will expire soon
      description: "网站: 【{{ $labels.instance }}】 SSL certificate expires in 3 days"
  - alert: BlackboxSslCertificateExpired
    expr: probe_ssl_earliest_cert_expiry - time() <= 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: Blackbox SSL certificate expired
      description: "网站: 【{{ $labels.instance }}】 SSL certificate has expired already"
  - alert: BlackboxProbeSlowHttp
    expr: avg_over_time(probe_http_duration_seconds[1m]) > 1
    for: 1m
    labels:
      severity: warning
    annotations:
      title: Blackbox probe slow HTTP
      description: "网站: 【{{ $labels.instance }}】HTTP request took more than 1s, Current Value: {{ $value }}s"
  - alert: BlackboxProbeSlowPing
    expr: avg_over_time(probe_icmp_duration_seconds[1m]) > 1
    for: 1m
    labels:
      severity: warning
    annotations:
      title: Blackbox probe slow ping
      description: "主机: 【{{ $labels.instance }}】Blackbox ping took more than 1s, Current Value: {{ $value }}s"  
EOF

Mysql.rules

cat > /data/prometheus/conf/rules/Mysql.yaml << 'EOF'
groups:
- name: Mysql.rules
  rules:
  ## Mysql Alarm Rules
  - alert: MysqlDown
    expr: mysql_up == 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'MySQL down'
      description: "Mysql实例: 【{{ $labels.instance }}】, MySQL instance is down"

  - alert: MysqlRestarted
    expr: mysql_global_status_uptime < 60
    for: 0m
    labels:
      severity: info
    annotations:
      title: 'MySQL Restarted'
      description: "Mysql实例: 【{{ $labels.instance }}】, MySQL has just been restarted, less than one minute ago"

  - alert: MysqlTooManyConnections(>80%)
    expr: avg by (instance) (rate(mysql_global_status_threads_connected[1m])) / avg by (instance) (mysql_global_variables_max_connections) * 100 > 80
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'MySQL too many connections (> 80%)'
      description: "Mysql实例: 【{{ $labels.instance }}】, More than 80% of MySQL connections are in use, Current Value: {{ $value }}%"

  - alert: MysqlThreadsRunningHigh
    expr: mysql_global_status_threads_running > 40
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'MySQL Threads_Running High'
      description: "Mysql实例: 【{{ $labels.instance }}】, Threads_Running above the threshold(40), Current Value: {{ $value }}"

  - alert: MysqlQpsHigh
    expr: sum by (instance) (rate(mysql_global_status_queries[2m])) > 500
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'MySQL QPS High'
      description: "Mysql实例: 【{{ $labels.instance }}】, MySQL QPS above 500"

  - alert: MysqlSlowQueries
    expr: increase(mysql_global_status_slow_queries[1m]) > 0
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'MySQL slow queries'
      description: "Mysql实例: 【{{ $labels.instance }}】, has some new slow query."

  - alert: MysqlTooManyAbortedConnections
    expr: round(increase(mysql_global_status_aborted_connects[5m])) > 20
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'MySQL too many Aborted connections in 2 minutes'
      description: "Mysql实例: 【{{ $labels.instance }}】, {{ $value }} Aborted connections within 2 minutes"

  - alert: MysqlTooManyAbortedClients
    expr: round(increase(mysql_global_status_aborted_clients[120m])) > 10
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'MySQL too many Aborted connections in 2 hours'
      description: "Mysql实例: 【{{ $labels.instance }}】, {{ $value }} Aborted Clients within 2 hours"

  - alert: MysqlSlaveIoThreadNotRunning
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) mysql_slave_status_slave_io_running == 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'MySQL Slave IO thread not running'
      description: "Mysql实例: 【{{ $labels.instance }}】, MySQL Slave IO thread not running"

  - alert: MysqlSlaveSqlThreadNotRunning
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) mysql_slave_status_slave_sql_running == 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'MySQL Slave SQL thread not running'
      description: "Mysql实例: 【{{ $labels.instance }}】, MySQL Slave SQL thread not running"

  - alert: MysqlSlaveReplicationLag
    expr: mysql_slave_status_master_server_id > 0 and ON (instance) (mysql_slave_status_seconds_behind_master - mysql_slave_status_sql_delay) > 30
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'MySQL Slave replication lag'
      description: "Mysql实例: 【{{ $labels.instance }}】, MySQL replication lag"

  - alert: MysqlInnodbLogWaits
    expr: rate(mysql_global_status_innodb_log_waits[15m]) > 10
    for: 0m
    labels:
      severity: warning
    annotations:
      title: 'MySQL InnoDB log waits'
      description: "Mysql实例: 【{{ $labels.instance }}】, innodb log writes stalling"
EOF

Redis.rules

cat > /data/prometheus/conf/rules/redis.yaml << 'EOF'
groups:
- name: Redis.rules
  rules:
## Redis Alarm Rules
  - alert: RedisDown
    expr: redis_up == 0
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'Redis down'
      description: "Redis实例: 【{{ $labels.instance }}】, Redis instance is down"

  - alert: RedisMissingMaster
    expr: count(redis_instance_info{role="master"}) < 1
    for: 2m
    labels:
      severity: critical
    annotations:
      title: 'Redis missing master'
      description: "Redis cluster has no node marked as master."

  - alert: RedisTooManyMasters
    expr: count(redis_instance_info{role="master"}) > 1
    for: 2m
    labels:
      severity: critical
    annotations:
      title: 'Redis too many masters'
      description: "Redis cluster has too many nodes marked as master."

  - alert: RedisDisconnectedSlaves
    expr: count without (instance, job) (redis_connected_slaves) - sum without (instance, job) (redis_connected_slaves) - 1 > 1
    for: 2m
    labels:
      severity: critical
    annotations:
      title: 'Redis disconnected slaves'
      description: "Redis not replicating for all slaves. Consider reviewing the redis replication status."

  - alert: RedisReplicationBroken
    expr: delta(redis_connected_slaves[1m]) < 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Redis replication broken'
      description: "Redis实例: 【{{ $labels.instance }}】,Redis instance lost a slave"

  - alert: RedisClusterFlapping
    expr: changes(redis_connected_slaves[1m]) > 1
    for: 2m
    labels:
      severity: critical
    annotations:
      title: 'Redis cluster flapping'
      description: "Redis实例: 【{{ $labels.instance }}】,Changes have been detected in Redis replica connection. This can occur when replica nodes lose connection to the master and reconnect (a.k.a flapping)."

  - alert: RedisMissingBackup
    expr: time() - redis_rdb_last_save_timestamp_seconds > 60 * 60 * 24
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Redis missing backup'
      description: "Redis实例: 【{{ $labels.instance }}】,Redis has not been backuped for 24 hours"

  - alert: RedisOutOfConfiguredMaxmemory
    expr: redis_memory_used_bytes / redis_memory_max_bytes * 100 > 90
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'Redis out of configured maxmemory'
      description: "Redis实例: 【{{ $labels.instance }}】,Redis is running out of configured maxmemory (> 90%), Current Value: {{ $value }}"

  - alert: RedisTooManyConnections
    expr: redis_connected_clients > 100
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'Redis too many connections'
      description: "Redis实例: 【{{ $labels.instance }}】, Redis instance has too many connections, Current Value: {{ $value }}"

  - alert: RedisNotEnoughConnections
    expr: redis_connected_clients < 5
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'Redis not enough connections'
      description: "Redis实例: 【{{ $labels.instance }}】, Redis instance should have more connections (> 5), Current Value: {{ $value }}"

  - alert: RedisRejectedConnections
    expr: increase(redis_rejected_connections_total[1m]) > 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Redis rejected connections'
      description: "Redis实例: 【{{ $labels.instance }}】, Some connections to Redis has been rejected, Current Value: {{ $value }}"
EOF

Elasticsearch.rules

cat > /data/prometheus/conf/rules/elasticsearch.yaml << 'EOF'
groups:
- name: Elasticsearch.rules
  rules:
   ## ES Alarm Rules
  - alert: ElasticsearchHeapUsageTooHigh
    expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 90
    for: 2m
    labels:
      severity: critical
    annotations:
      title: "Elasticsearch Heap Usage Too High"
      description: "主机: 【{{ $labels.instance }}】, The heap usage is over 90%, Current Value: {{ $value }}"

  - alert: ElasticsearchHeapUsageWarning
    expr: (elasticsearch_jvm_memory_used_bytes{area="heap"} / elasticsearch_jvm_memory_max_bytes{area="heap"}) * 100 > 80
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'Elasticsearch Heap Usage warning'
      description: "主机: 【{{ $labels.instance }}】, The heap usage is over 80%, Current Value: {{ $value }}"

  - alert: ElasticsearchDiskOutOfSpace
    expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 10
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Elasticsearch disk out of space'
      description: "主机: 【{{ $labels.instance }}】, The disk usage is over 90%, Current Value: {{ $value }}"

  - alert: ElasticsearchDiskSpaceLow
    expr: elasticsearch_filesystem_data_available_bytes / elasticsearch_filesystem_data_size_bytes * 100 < 20
    for: 2m
    labels:
      severity: warning
    annotations:
      title: 'Elasticsearch disk space low'
      description: "主机: 【{{ $labels.instance }}】, The disk usage is over 80%, Current Value: {{ $value }}"

  - alert: ElasticsearchClusterRed
    expr: elasticsearch_cluster_health_status{color="red"} == 1
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Elasticsearch Cluster Red'
      description: "主机: 【{{ $labels.instance }}】, Elastic Cluster Red status"

  - alert: ElasticsearchClusterYellow
    expr: elasticsearch_cluster_health_status{color="yellow"} == 1
    for: 0m
    labels:
      severity: warning
    annotations:
      title: 'Elasticsearch Cluster Yellow'
      description: "主机: 【{{ $labels.instance }}】, Elastic Cluster Yellow status"

  - alert: ElasticsearchHealthyNodes
    expr: elasticsearch_cluster_health_number_of_nodes < 3
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Elasticsearch Healthy Nodes'
      description: "Missing node in Elasticsearch cluster"

  - alert: ElasticsearchHealthyDataNodes
    expr: elasticsearch_cluster_health_number_of_data_nodes < 3
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Elasticsearch Healthy Data Nodes'
      description: "Missing data node in Elasticsearch cluster"

  - alert: ElasticsearchRelocatingShards
    expr: elasticsearch_cluster_health_relocating_shards > 0
    for: 0m
    labels:
      severity: info
    annotations:
      title: 'Elasticsearch relocating shards'
      description: "主机: 【{{ $labels.instance }}】, Elasticsearch is relocating shards"

  - alert: ElasticsearchRelocatingShardsTooLong
    expr: elasticsearch_cluster_health_relocating_shards > 0
    for: 15m
    labels:
      severity: warning
    annotations:
      title: 'Elasticsearch relocating shards too long'
      description: "主机: 【{{ $labels.instance }}】, Elasticsearch has been relocating shards for 15min"

  - alert: ElasticsearchInitializingShards
    expr: elasticsearch_cluster_health_initializing_shards > 0
    for: 0m
    labels:
      severity: info
    annotations:
      title: 'Elasticsearch initializing shards'
      description: "主机: 【{{ $labels.instance }}】, Elasticsearch is initializing shards"

  - alert: ElasticsearchInitializingShardsTooLong
    expr: elasticsearch_cluster_health_initializing_shards > 0
    for: 15m
    labels:
      severity: warning
    annotations:
      title: 'Elasticsearch initializing shards too long'
      description: "主机: 【{{ $labels.instance }}】, Elasticsearch has been initializing shards for 15 min"

  - alert: ElasticsearchUnassignedShards
    expr: elasticsearch_cluster_health_unassigned_shards > 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Elasticsearch unassigned shards'
      description: "主机: 【{{ $labels.instance }}】, Elasticsearch has unassigned shards"

  - alert: ElasticsearchPendingTasks
    expr: elasticsearch_cluster_health_number_of_pending_tasks > 0
    for: 15m
    labels:
      severity: warning
    annotations:
      title: 'Elasticsearch pending tasks'
      description: "主机: 【{{ $labels.instance }}】, Elasticsearch has pending tasks. Cluster works slowly, Current Value: {{ $value }}"

  - alert: ElasticsearchNoNewDocuments
    expr: increase(elasticsearch_indices_docs{es_data_node="true"}[10m]) < 1
    for: 0m
    labels:
      severity: warning
    annotations:
      title: 'Elasticsearch no new documents'
      description: "主机: 【{{ $labels.instance }}】, Elasticsearch No new documents for 10 min!"
EOF

kafka.rules

cat > /data/prometheus/conf/rules/kafka.yaml << 'EOF'
groups:
- name: kafka.rules
  rules:
##  KAFKA Alarm Rules
  - alert: KafkaTopicsReplicas
    expr: sum(kafka_topic_partition_in_sync_replica) by (topic) < 3
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Kafka topics replicas less than 3'
      description: "Topic: {{ $labels.topic }} partition less than 3, Current Value: {{ $value }}"

  - alert: KafkaConsumersGroupLag
    expr: sum(kafka_consumergroup_lag) by (consumergroup) > 50
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'Kafka consumers group 消费滞后'
      description: "Kafka consumers group 消费滞后 (Lag > 50), Lag值: {{ $value }}"
      
  - alert: KafkaConsumersTopicLag
    expr: sum(kafka_consumergroup_lag) by (topic) > 50
    for: 1m
    labels:
      severity: critical
    annotations:
      title: 'Kafka Topic 消费滞后'
      description: "Kafka Topic 消费滞后 (Lag > 50), Lag值: {{ $value }}"
EOF

Docker.rules

cat > /data/prometheus/conf/rules/Docker.yaml << 'EOF'
groups:
- name: Docker.rules
  rules:
  - alert: DockerInstanceDown
    expr: up{job="cAdvisor"} == 0
    for: 0m
    labels:
      severity: critical
    annotations:
      title: 'Docker Instance down'
      description: "容器实例: 【{{ $labels.instance }}】has been down for more than 1 minute"
      
  - alert: ContainerKilled
    expr: time() - container_last_seen{name!=""} > 60
    for: 1m
    labels:
      severity: critical
    annotations:
      title: "A Container has disappeared"
      description: "Container Name 【{{ $labels.name }}】 on 主机【{{ $labels.instance }}】 has disappeared"

  - alert: ContainerCpuUsage
    expr: (sum by(instance, name) (rate(container_cpu_usage_seconds_total{name!=""}[3m])) * 100) > 80
    for: 2m
    labels:
      severity: warning
    annotations:
      title: "Container CPU usaged above 80%"
      description: "Container Name 【{{ $labels.name }}】 on 主机【{{ $labels.instance }}】 CPU usage is above 80%, Current Value: {{ $value }}"

  - alert: ContainerMemoryUsage
    expr: (sum by(instance, name) (container_memory_working_set_bytes{name!=""}) / sum by(instance, name) (container_spec_memory_limit_bytes{name!=""} > 0) * 100)  > 80
    for: 2m
    labels:
      severity: warning
    annotations:
      title: "Container CPU usaged above 80%"
      description: "Container Name 【{{ $labels.name }}】 on 主机【{{ $labels.instance }}】 Memory usage is above 80%, Current Value: {{ $value }}"
  - alert: ContainerVolumeUsage
    expr: (1 - (sum(container_fs_inodes_free) BY (instance) / sum(container_fs_inodes_total) BY (instance))) * 100 > 80
    for: 5m
    labels:
      severity: warning
    annotations:
      title: "Container Volume usaged above 80%"
      description: "Container Name 【{{ $labels.name }}】 on 主机【{{ $labels.instance }}】 Volume usage is above 80%, Current Value: {{ $value }}"
EOF

你可能感兴趣的:(监控相关,prometheus,alertmanager,rules,告警规则)