当nginx服务器多的时候,遇到问题排查日志一台一台登录去查是相当耗时耗力的
centos7.4
Elasticsearch 6.6.2
Logstash 6.6.2
kibana 6.6.2
Filebeat 6.6.2
Redis 4.0.1
supervisor
filebeat–读取nginx的access的目录日志–传到redis里面
logstash—读取redis的内容-----传到Elasticsearch
kibana-----读取Elasticsearch界面展示出来
kibana----可以检索日志–可以按访问IP排名排序,可以查看IP访问分布等一系列的内容
supervisor–做一个进程托管守护进程
filebeat nginx服务用filebeat没有用logstash的原因是logstash要java环境 体量比较量 而filebeat则是轻量级的
nginx日志做了json格式 数据量大的时候提升logstash的性能
nginx日志clent的IP 在logstash磁取插入到Elasticsearch的时候利用GeoLite2-City.mmdb获取IP的详细信息,
可以在kibana地图展示IP来源分布
supervisor 用其原因为filebeat logstash kibana 等都是用命令起动 虽可以用脚本做进程守护但是还会有其它问题
所有用supervisor进程托管设置异常重起,开机起动等参数有效的保证ELK稳定性
mkdir -p /var/log/supervisor/logs
yum install -y epel-release
yum install -y supervisor
echo "[inet_http_server]" >> /etc/supervisord.conf
echo "port=0.0.0.0:28999" >> /etc/supervisord.conf
echo "username=username" >> /etc/supervisord.conf
echo "password=password >> /etc/supervisord.conf
systemctl enable supervisord
systemctl restart supervisord
supervisorctl reload
supervisorctl status
supervisorctl stop 进程名称
supervisorctl start 进程名称
http://IP:28999 输入用户名密码操作
下面是日志格式
该格式是参考grafana
https://grafana.com/dashboards/2292
log_format main '{"@timestamp":"$time_iso8601",'
'"@source":"$server_addr",'
'"hostname":"$hostname",'
'"ip":"$http_x_forwarded_for",'
'"client":"$remote_addr",'
'"request_method":"$request_method",'
'"scheme":"$scheme",'
'"domain":"$server_name",'
'"referer":"$http_referer",'
'"request":"$request_uri",'
'"args":"$args",'
'"size":$body_bytes_sent,'
'"status": $status,'
'"responsetime":$request_time,'
'"upstreamtime":"$upstream_response_time",'
'"upstreamaddr":"$upstream_addr",'
'"http_user_agent":"$http_user_agent",'
'"https":"$https"'
'}';
软件下载连接 https://www.elastic.co/downloads/
vim /data/filebeat/conf/nginx.yml
filebeat.prospectors:
- type: log
paths:
- '/usr/local/nginx/log/nginx.access.log'
json.message_key: log
json.keys_under_root: true
json.add_error_key: true
output.redis:
hosts: ["x.x.x.x"]
port: 6379
key: "xxx-nginx"
db: 0
password: "redispassword"
timeout: 5
filebeat启动命令:/data/filebeat/filebeat -e -c /data/filebeat/conf/nginx.yml
下面使用supervisord启动filebeat
vim /etc/supervisord.d/filebeat-nginx.ini
[program: filebeat-nginx]
command=/data/filebeat/filebeat -e -c /data/filebeat/conf/nginx.yml
numprocs=1
autostart=true
startsecs=1
startretries=3
autorestart=true
stopsignal=TERM
stopwaitsecs=5
stopasgroup=true
killasgroup=true
user=root
stdout_logfile=/var/log/supervisor/logs/filebeat-nginx_stdout.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=20
stderr_logfile=/var/log/supervisor/logs/filebeat-nginx_stderr.log
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=20
input {
redis {
host => "127.0.0.1"
port => "6379"
data_type => "list"
key => "xxx-nginx"
type => "xxx-nginx"
password => "redispassword"
}
redis {
host => "127.0.0.1"
port => "6379"
data_type => "list"
key => "yyy-nginx"
type => "yyy-nginx"
password => "redispassword"
}
}
filter {
geoip {
source => "client"
target => "geoip"
database =>"/data/logstash/GeoLite2-City.mmdb"
add_field => ["[geoip][coordinates]", "%{[geoip][longitude]}" ]
add_field => [ "[geoip][coordinates]","%{[geoip][latitude]}" ]
}
mutate {
convert => [ "[geoip][coordinates]", "float"]
}
}
output {
if [type] == "xxx-nginx" {
elasticsearch {
hosts => "127.0.0.1:9200"
index => "logstash-xxx-%{+YYYY.MM.dd}"
manage_template => true
}
}
if [type] == "yyy-nginx"{
elasticsearch {
hosts => "127.0.0.1:9200"
index => "logstash-yyy-%{+YYYY.MM.dd}"
manage_template => true
}
}
}
logstash启动命令: /data/logstash/bin/logstash -f /data/logstash/config/nginx.conf
下面使用supervisord启动logstash
vim /etc/supervisord.d/filebeat-nginx.ini
[program: filebeat-nginx]
command=/data/filebeat/filebeat -e -c /data/filebeat/conf/nginx.yml
numprocs=1
autostart=true
startsecs=1
startretries=3
autorestart=true
stopsignal=TERM
stopwaitsecs=5
stopasgroup=true
killasgroup=true
user=root
stdout_logfile=/var/log/supervisor/logs/filebeat-nginx_stdout.log
stdout_logfile_maxbytes=100MB
stdout_logfile_backups=20
stderr_logfile=/var/log/supervisor/logs/filebeat-nginx_stderr.log
stderr_logfile_maxbytes=100MB
stderr_logfile_backups=20