prometheus and grafana and cAdvisor

Global setting

network configure

docker network create --driver bridge --subnet 10.0.0.0/24 --gateway 10.0.0.1 monitor

Grafana configure

Grafana Template

# pull grafana image
docker pull grafana/grafana

grafana目录

  • 配置文件/etc/grafana/
  • sqlite3 database file /var/lib/grafana

grafana在docker中的环境变量

  • GF_SERVER_ROOT_URL=http://grafana.server.name 指定grafana的访问路径
  • GF_SECURITY_ADMIN_PASSWORD=secret 指定grafana的登录密码
  • GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource 指定安装插件的变量

构建持久性存储

docker run \
 -d \
 -v /var/lib/grafana \
 --name grafana-storage \
 busybox:latest

启动容器

# 启动Grafana容器
docker run \
 -d \
 -p 3000:3000 \
 --name grafana \
 --volumes-from grafana-storage \
 -e "GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource" \
 -e "GF_SERVER_ROOT_URL=http://10.0.0.10:3000" \
 -e "GF_SECURITY_ADMIN_PASSWORD=marion" \
 --network monitor \
 --ip 10.0.0.10 \
 --restart always \
 grafana/grafana

# 查看配置文件以及数据目录的挂载位置
docker inspect grafana

cAdvisor

sudo docker run \
  --volume=/:/rootfs:ro \
  --volume=/var/run:/var/run:rw \
  --volume=/sys:/sys:ro \
  --volume=/var/lib/docker/:/var/lib/docker:ro \
  --volume=/dev/disk/:/dev/disk:ro \
  --detach=true \
  --name=cadvisor \
  --network monitor \
  --ip 10.0.0.11 \
  google/cadvisor:latest

prometheus configure

global:
  scrape_interval:     60s
  evaluation_interval: 60s

scrape_configs:
  - job_name: prometheus
    static_configs:
      - targets: ['localhost:9090']
        labels:
          instance: prometheus

  - job_name: 'cAdvisor'
    static_configs:
      - targets: ['10.0.0.11:8080']
        labels:
          instance: db1
docker run \
 -dit \
 -v /root/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml \
 --network monitor \
 --ip 10.0.0.12 \
 prom/prometheus

Deploy by docker-compose yaml file

docker-compose.yml

prometheus:
    image: prom/prometheus:latest
    container_name: monitoring_prometheus
    restart: unless-stopped
    volumes:
      - ./data/prometheus/config:/etc/prometheus/
      - ./data/prometheus/data:/prometheus
    command:
      - '-config.file=/etc/prometheus/prometheus.yml'
      - '-storage.local.path=/prometheus'
      - '-alertmanager.url=http://alertmanager:9093'
    expose:
      - 9090
    ports:
      - 9090:9090
    links:
      - cadvisor:cadvisor
      - node-exporter:node-exporter

  node-exporter:
    image: prom/node-exporter:latest
    container_name: monitoring_node_exporter
    restart: unless-stopped
    expose:
      - 9100

  cadvisor:,
    image: google/cadvisor:latest
    container_name: monitoring_cadvisor
    restart: unless-stopped
    volumes:
      - /:/rootfs:ro
      - /var/run:/var/run:rw
      - /sys:/sys:ro
      - /var/lib/docker/:/var/lib/docker:ro
    expose:
      - 8080

  grafana:
    image: grafana/grafana:latest
    container_name: monitoring_grafana
    restart: unless-stopped
    links:
      - prometheus:prometheus
    volumes:
      - ./data/grafana:/var/lib/grafana
    environment:
      - GF_SECURITY_ADMIN_PASSWORD=MYPASSWORT
      - GF_USERS_ALLOW_SIGN_UP=false
      - GF_SERVER_DOMAIN=myrul.com
      - GF_SMTP_ENABLED=true
      - GF_SMTP_HOST=smtp.gmail.com:587
      - [email protected]
      - GF_SMTP_PASSWORD=mypassword
      - [email protected]

Docker_cAdvisor_prometheus_第1张图片
Docker_cAdvisor_prometheus_第2张图片
Docker_cAdvisor_prometheus_第3张图片

prometheus.yml

# my global config
global:
  scrape_interval:     120s # By default, scrape targets every 15 seconds.
  evaluation_interval: 120s # By default, scrape targets every 15 seconds.
  # scrape_timeout is set to the global default (10s).

  # Attach these labels to any time series or alerts when communicating with
  # external systems (federation, remote storage, Alertmanager).
  external_labels:
      monitor: 'my-project'

# Load and evaluate rules in this file every 'evaluation_interval' seconds.
rule_files:
  # - "alert.rules"
  # - "first.rules"
  # - "second.rules"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=` to any timeseries scraped from this config.
  - job_name: 'prometheus'

    # Override the global default and scrape targets from this job every 5 seconds.
    scrape_interval: 120s

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
         - targets: ['localhost:9090','cadvisor:8080','node-exporter:9100', 'nginx-exporter:9113']

command

docker-compose up -d

或扫描关注二维码,关注更多动态
Docker_cAdvisor_prometheus_第4张图片