docker-compose部署ELK

image.png

利用filebeat收集数据,通过编写配置文件,将产生的日志文件,发送给logstash,logstash再将数据发送给elasticsearch, 最后数据的展示交给kibana

修改 /etc/sysctl.conf

echo "vm.max_map_count=262144" > /etc/sysctl.conf
sysctl -p

docker-compose.yaml

version: "3.2"
services: 
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2
    networks:
      - "elk-net"
    container_name: elasticsearch
    ports:
      - "9200:9200"
    environment:
      - discovery.zen.minimum_master_nodes=2
      - bootstrap.memory_lock=true
      - node.name=es01
      - cluster.name=es-docker-cluster
      - discovery.seed_hosts=es02,es03
      - cluster.initial_master_nodes=elasticsearch,es02,es03
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ulimits:
      memlock:
        soft: -1
        hard: -1

  es02:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2
    networks:
      - "elk-net"
    container_name: es02
    environment:
      - discovery.zen.minimum_master_nodes=2
      - node.name=es02
      - cluster.name=es-docker-cluster
      - discovery.seed_hosts=elasticsearch,es03
      - cluster.initial_master_nodes=elasticsearch,es02,es03
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
     # - bootstrap.mlockall=true
    ulimits:
      memlock:
        soft: -1
        hard: -1

  es03:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2
    networks:
      - "elk-net"
    container_name: es03
    environment:
      - discovery.zen.minimum_master_nodes=2
      - node.name=es03
      - cluster.name=es-docker-cluster
      - discovery.seed_hosts=es02,elsticsearch
      - cluster.initial_master_nodes=elasticsearch,es02,es03
      - bootstrap.memory_lock=true
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    #  - bootstrap.mlockall=true
    ulimits:
      memlock:
        soft: -1
        hard: -1

  filebeat: 
    container_name: filebeat
    image: docker.elastic.co/beats/filebeat:7.4.2
    networks:
      - "elk-net"
    volumes:
      - type: bind
        source: "./Filebeat/app.log"
        target: "/app.log"
      - type: bind
        source: "./Filebeat/filebeat.yml"
        target: "/usr/share/filebeat/filebeat.yml" 
    depends_on:
      - "logstash"

  logstash:
    container_name: logstash
    image: docker.elastic.co/logstash/logstash:7.4.2 
    volumes:
      - type: bind
        source: "./logstash/logstash_stdout.conf"
        target: "/usr/share/logstash/pipeline/logstash.conf" 
    networks:
      - "elk-net"
    depends_on:
      - "elasticsearch"
      
  kibana:
    container_name: kibana
    image: docker.elastic.co/kibana/kibana:7.4.2
    networks:
      - "elk-net"
    ports:
      - "5601:5601"
    depends_on:
      - "elasticsearch"  
networks:
  elk-net:
    driver: bridge

访问kibanna

(仅仅只是访问并没有将收集到的数据展示)

image.png

filebeat.yaml

filebeat.inputs:  
- type: log
  paths:
    - /*.log

output.logstash:   
  # The Logstash hosts
  hosts: ["logstash:5044"]  

logstash的配置文件

input {
  beats {
    port => 5044
    host => "0.0.0.0"
  }
}
output {
  elasticsearch{
    hosts => ["elasticsearch:9200"] 
    manage_template => false
    index => "%{[@metadata][beat]}-%{[@metadata][version]}-%{+YYYY.MM.dd}"
  }
   # stdout { codec => rubydebug }  # 假如有问题,可以打开此行进行调试
}

你可能感兴趣的:(docker-compose部署ELK)