[root@localhost ~]# cat /etc/redhat-release
CentOS Linux release 7.5.1804 (Core)
[root@localhost ~]# uname -r
3.10.0-862.el7.x86_64
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# systemctl disable firewalld
[root@localhost ~]# sestatus
SELinux status: disabled
[root@localhost ~]# ls
anaconda-ks.cfg apache-tomcat-8.5.33.tar.gz jdk-8u60-linux-x64.tar.gz nginx-1.10.2.tar.gz
[root@localhost ~]# tar xf jdk-8u60-linux-x64.tar.gz -C /usr/local/
[root@localhost ~]# mv /usr/local/jdk1.8.0_60 /usr/local/jdk
[root@localhost ~]# vim /etc/profile
[root@localhost ~]# tail -3 /etc/profile
export JAVA_HOME=/usr/local/jdk/
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/tools.jar:$JAVA_HOME/lib/dt.jar:$CLASSPATH
[root@localhost ~]# source /etc/profile
[root@localhost ~]# java -version
java version "1.8.0_60"
Java(TM) SE Runtime Environment (build 1.8.0_60-b27)
Java HotSpot(TM) 64-Bit Server VM (build 25.60-b23, mixed mode)
[root@localhost ~]# ls
anaconda-ks.cfg jdk-8u60-linux-x64.tar.gz nginx-1.10.2.tar.gz
apache-tomcat-8.5.33.tar.gz kibana-6.2.3-linux-x86_64.tar.gz
[root@localhost ~]# useradd -s /sbin/nologin -M elk
[root@localhost ~]# tar xf kibana-6.2.3-linux-x86_64.tar.gz -C /usr/local/
[root@localhost ~]# mv /usr/local/kibana-6.2.3-linux-x86_64 /usr/local/kibana
[root@localhost ~]# cd /usr/local/kibana/config/
[root@localhost config]# cp kibana.yml{,.bak}
[root@localhost config]# vim kibana.yml
2 server.port: 5601 #暂时就先修改这两行
7 server.host: "0.0.0.0" #暂时就先修改这两行
21 #elasticsearch.url: "http://localhost:9200"
39 #elasticsearch.username: "user"
40 #elasticsearch.password: "pass"
[root@localhost config]# chown -R elk.elk /usr/local/kibana/
[root@localhost config]# vim /usr/local/kibana/bin/start.sh
nohup /usr/local/kibana/bin/kibana >> /tmp/kibana.log 2>> /tmp/kibana.log &
[root@localhost config]# chmod a+x /usr/local/kibana/bin/start.sh
[root@localhost config]# su -s /bin/bash elk '/usr/local/kibana/bin/start.sh'
[root@localhost config]# ps -ef | grep elk | grep -v grep
elk 1311 1 10 09:07 pts/0 00:00:08 /usr/local/kibana/bin/../node/bin/node --no-warnings /usr/local/kibana/bin/../src/cli
如果有防火墙需要开放tcp5601端口
[root@localhost config]# cat /tmp/kibana.log | grep warning | head -5
{"type":"log","@timestamp":"2019-01-09T14:07:14Z","tags":["warning","elasticsearch","admin"],"pid":1311,"message":"Unable to revive connection: http://localhost:9200/"}
{"type":"log","@timestamp":"2019-01-09T14:07:14Z","tags":["warning","elasticsearch","admin"],"pid":1311,"message":"No living connections"}
{"type":"log","@timestamp":"2019-01-09T14:07:17Z","tags":["warning","elasticsearch","admin"],"pid":1311,"message":"Unable to revive connection: http://localhost:9200/"}
{"type":"log","@timestamp":"2019-01-09T14:07:17Z","tags":["warning","elasticsearch","admin"],"pid":1311,"message":"No living connections"}
{"type":"log","@timestamp":"2019-01-09T14:07:19Z","tags":["warning","elasticsearch","admin"],"pid":1311,"message":"Unable to revive connection: http://localhost:9200/"}
这里有个警告,意思是连接不上elasticsearch,忽略,因为我们还没有装它。
#修改kibana的配置文件,改为监听127.0.0.1
[root@localhost config]# vim /usr/local/kibana/config/kibana.yml
7 server.host: "127.0.0.1"
#关闭kibana,重启动kibana
[root@localhost config]# ps -ef | grep elk
elk 1311 1 1 09:07 pts/0 00:00:12 /usr/local/kibana/bin/../node/bin/node --no-warnings /usr/local/kibana/bin/../src/cli
root 1360 1241 0 09:19 pts/0 00:00:00 grep --color=auto elk
[root@localhost config]# kill -9 1311
[root@localhost config]# ps -ef | grep elk
root 1362 1241 0 09:19 pts/0 00:00:00 grep --color=auto elk
[root@localhost nginx]# su -s /bin/bash elk '/usr/local/kibana/bin/start.sh'
[root@localhost nginx]# ps -ef | grep elk | grep -v grep
elk 1415 1 12 09:35 pts/0 00:00:02 /usr/local/kibana/bin/../node/bin/node --no-warnings /usr/local/kibana/bin/../src/cli
#编译安装nginx
[root@localhost ~]# tar xf nginx-1.10.2.tar.gz -C /usr/src/
[root@localhost ~]# cd /usr/src/nginx-1.10.2/
[root@localhost nginx-1.10.2]# yum -y install pcre-devel openssl-devel
[root@localhost nginx-1.10.2]# useradd -s /sbin/nologin -M www
[root@localhost nginx-1.10.2]# ./configure --user=www --group=www --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module
[root@localhost nginx-1.10.2]# make && make install
[root@localhost nginx-1.10.2]# ln -s /usr/local/nginx/sbin/* /usr/local/sbin/
[root@localhost config]# nginx -V
nginx version: nginx/1.10.2
built by gcc 4.8.5 20150623 (Red Hat 4.8.5-36) (GCC)
built with OpenSSL 1.0.2k-fips 26 Jan 2017
TLS SNI support enabled
configure arguments: --user=www --group=www --prefix=/usr/local/nginx --with-http_stub_status_module --with-http_ssl_module
#编辑nginx配置文件,进行访问控制,并启动nginx
[root@localhost nginx]# cp conf/nginx.conf{,.bak}
[root@localhost nginx]# egrep -v "#|^$" conf/nginx.conf.bak > conf/nginx.conf
[root@localhost nginx]# vim conf/nginx.conf
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
log_format main '$remote_addr - $remote_user [$time_local] "$request"'
'$status $body_bytes_sent "$http_referer"'
'"$http_user_agent""$http_x_forwarded_for"';
server {
listen 5609;
access_log /usr/local/nginx/logs/kibana_access.log main;
error_log /usr/local/nginx/logs/kibana_error.log error;
location / {
allow 192.168.100.1;
deny all;
proxy_pass http://127.0.0.1:5601;
}
}
}
[root@localhost nginx]# /usr/local/nginx/sbin/nginx -t
nginx: the configuration file /usr/local/nginx/conf/nginx.conf syntax is ok
nginx: configuration file /usr/local/nginx/conf/nginx.conf test is successful
[root@localhost nginx]# /usr/local/nginx/sbin/nginx
[root@localhost nginx]# netsta -antup | grep nginx
-bash: netsta: command not found
[root@localhost nginx]# netstat -antup | grep nginx
tcp 0 0 0.0.0.0:5609 0.0.0.0:* LISTEN 1405/nginx: master
# nginx编译完毕
[root@www html]# vim /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.146.174 www.yunjisuan.com
location / {
auth_basic "elk auth";
auth_basic_user_file /usr/local/nginx/conf/htpasswd;
proxy_pass http://127.0.0.1:5601;
}
elasticsearch未安装之前,kibana网页上报错,提示找不到elasticsearch。
[root@localhost ~]# tar xf elasticsearch-6.2.3.tar.gz -C /usr/local/
[root@localhost ~]# mv /usr/local/elasticsearch-6.2.3 /usr/local/elasticsearch
#修改以下配置文件中的代码
[root@localhost ~]# vim /usr/local/elasticsearch/config/elasticsearch.yml
33 path.data: /usr/local/elasticsearch/data
37 path.logs: /usr/local/elasticsearch/logs
55 network.host: 127.0.0.1 #只支持本地写入数据为了ES的安全,在企业中都是用IP地址访问
59 http.port: 9200
[root@localhost ~]# chown -R elk.elk /usr/local/elasticsearch/
[root@ELK ~]# vim /usr/local/elasticsearch/config/jvm.options
22 -Xms1g
23 -Xmx1g
[root@localhost ~]# vim /usr/local/elasticsearch/bin/start.sh
/usr/local/elasticsearch/bin/elasticsearch -d >> /tmp/elasticsearch.log 2>> /tmp/elasticsearch.log
[root@localhost ~]# chmod a+x /usr/local/elasticsearch/bin/start.sh
[root@localhost ~]# su -s /bin/bash elk '/usr/local/elasticsearch/bin/start.sh'
[root@localhost ~]# ps -ef | grep elk | grep -v grep
elk 10389 1 3 10:46 pts/0 00:00:06 /usr/local/kibana/bin/../node/bin/node --no-warnings /usr/local/kibana/bin/../src/cli
elk 10450 1 78 10:49 pts/0 00:00:01 /usr/local/jdk//bin/java -Xms1g -Xmx1g -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -XX:+AlwaysPreTouch -Xss1m -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djna.nosys=true -XX:-OmitStackTraceInFastThrow -Dio.netty.noUnsafe=true -Dio.netty.noKeySetOptimization=true -Dio.netty.recycler.maxCapacityPerThread=0 -Dlog4j.shutdownHookEnabled=false -Dlog4j2.disable.jmx=true -Djava.io.tmpdir=/tmp/elasticsearch.JfukyhKj -XX:+HeapDumpOnOutOfMemoryError -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime -Xloggc:logs/gc.log -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=32 -XX:GCLogFileSize=64m -Des.path.home=/usr/local/elasticsearch -Des.path.conf=/usr/local/elasticsearch/config -cp /usr/local/elasticsearch/lib/* org.elasticsearch.bootstrap.Elasticsearch -d
[root@localhost ~]# tail -f /tmp/kibana.log
重新刷新url:http://192.168.100.100:5609
观察日志,看看还有没有报错。
备注:假如elasticsearch如果监听在非127.0.0.1,那么需要修改内核参数等,在这里就不多说了。
[root@localhost ~]# tar xf logstash-6.2.3.tar.gz -C /usr/local/
[root@localhost ~]# mv /usr/local/logstash-6.2.3 /usr/local/logstash
#修改如下配置
[root@localhost ~]# vim /usr/local/logstash/config/jvm.options
6 -Xms1g
7 -Xmx1g
#配置文件没有,需要新建
[root@localhost ~]# vim /usr/local/logstash/config/logstash.conf
input {
file {
path => "/usr/local/nginx/logs/kibana_access.log" #读取日志路径
}
}
output {
elasticsearch {
hosts => ["http://127.0.0.1:9200"] #保存日志url
}
}
[root@localhost ~]# vim /usr/local/logstash/bin/start.sh
nohup /usr/local/logstash/bin/logstash -f /usr/local/logstash/config/logstash.conf >> /tmp/logs
tash.log 2>> /tmp/logstash.log &
[root@localhost ~]# chmod a+x /usr/local/logstash/bin/start.sh
[root@localhost ~]# /usr/local/logstash/bin/start.sh
[root@localhost ~]# ps -ef | grep logstash
root 10700 1 76 11:12 pts/0 00:00:12 /usr/local/jdk//bin/java -Xms1g -Xmx1g -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=75 -XX:+UseCMSInitiatingOccupancyOnly -Djava.awt.headless=true -Dfile.encoding=UTF-8 -Djruby.compile.invokedynamic=true -Djruby.jit.threshold=0 -XX:+HeapDumpOnOutOfMemoryError -Djava.security.egd=file:/dev/urandom -cp /usr/local/logstash/logstash-core/lib/jars/animal-sniffer-annotations-1.14.jar:/usr/local/logstash/logstash-core/lib/jars/commons-compiler-3.0.8.jar:/usr/local/logstash/logstash-core/lib/jars/error_prone_annotations-2.0.18.jar:/usr/local/logstash/logstash-core/lib/jars/google-java-format-1.5.jar:/usr/local/logstash/logstash-core/lib/jars/guava-22.0.jar:/usr/local/logstash/logstash-core/lib/jars/j2objc-annotations-1.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-annotations-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-core-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/jackson-databind-2.9.1.jar:/usr/local/logstash/logstas-core/lib/jars/jackson-dataformat-cbor-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/janino-3.0.8.jar:/usr/local/logstash/logstash-core/lib/jars/javac-shaded-9-dev-r4023-3.jar:/usr/local/logstash/logstash-core/lib/jars/jruby-complete-9.1.13.0.jar:/usr/local/logstash/logstash-core/lib/jars/jsr305-1.3.9.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-api-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-core-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/log4j-slf4j-impl-2.9.1.jar:/usr/local/logstash/logstash-core/lib/jars/logstash-core.jar:/usr/local/logstash/logstash-core/lib/jars/slf4j-api-1.7.25.jar org.logstash.Logstash -f /usr/local/logstash/config/logstash.conf
root 10725 10274 0 11:12 pts/0 00:00:00 grep --color=auto logstash
特别提示:
logstash启动的比较慢,需要多等一会儿。
如果在kibana的Discover里能看到添加索引就说明logstash启动好了
运维分析日志的几个方面:
(1)并发访问量PV
(2)图片流量
7. 在kibana上配置索引,展现获取的kibana日志数据
进行数据展现字段的筛选
对nginx的kibana_access.log进行数据追踪,对比分析
[root@localhost ~]# tail -f /usr/local/nginx/logs/kibana_access.log
#执行下边的命令
[root@ELK ~]# /usr/local/logstash/bin/logstash -e ""
welcome #输入的内容
Sending Logstash's logs to /usr/local/logstash/logs which is now configured via log4j2.properties
[2019-01-09T20:32:36,851][INFO ][logstash.modules.scaffold] Initializing module {:module_name=>"fb_apache", :directory=>"/usr/local/logstash/modules/fb_apache/configuration"}
[2019-01-09T20:32:36,894][INFO ][logstash.modules.scaffold] Initializing module {:module_name=>"netflow", :directory=>"/usr/local/logstash/modules/netflow/configuration"}
[2019-01-09T20:32:39,478][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2019-01-09T20:32:40,873][INFO ][logstash.runner ] Starting Logstash {"logstash.version"=>"6.2.3"}
[2019-01-09T20:32:41,811][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2019-01-09T20:32:44,847][INFO ][logstash.pipeline ] Starting pipeline {:pipeline_id=>"main", "pipeline.workers"=>1, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>50}
The stdin plugin is now waiting for input:
[2019-01-09T20:32:45,170][INFO ][logstash.pipeline ] Pipeline started succesfully {:pipeline_id=>"main", :thread=>"#"}
[2019-01-09T20:32:45,356][INFO ][logstash.agent ] Pipelines running {:count=>1, :pipelines=>["main"]}
{
"@version" => "1",
"type" => "stdin",
"host" => "localhost",
"message" => "welcome",
"@timestamp" => 2019-01-10T01:32:45.302Z
}
^C[2019-01-09T20:32:58,826][WARN ][logstash.runner ] SIGINT received. Shutting down.
[2019-01-09T20:32:59,392][INFO ][logstash.pipeline ] Pipeline has terminated {:pipeline_id=>"main", :thread=>"#"}
可以看到logstash结尾自动添加了几个字段,时间戳@timestamp,版本@version,输入的类型type,以及主机名host
input {
file { path => "/var/log/messages" type => "syslog" }
file { path => "/var/log/apache/access.log" type => "apache" }
}
类似的,如果在filter中添加了多种处理规则,则按照它的顺序----处理,但是有一些插件并不是线程安全的。
比如在filter中指定了两个一样的插件,这两个任务并不能保证准确的按顺序执行,因此官方也推荐避免在filter中重复使用插件。
我们更改一下logstash的配置文件进行正则抓取数据的测试。
#logstash提取数据段配置文件模板
[root@ELK config]# cat logstash.conf
input {
stdin{} #从标准输入读取数据
}
filter {
grok {
match => {
"message" => '(?<字段名>正则表达式).*'
}
}
}
output {
elasticsearch { #如果要输入到elasticsearch里,那么需要注释掉stdout{}
hosts => ["http://127.0.0.1:9200"]
}
stdout { #只将信息输出到屏幕上
codec => rubydebug #用于正则提取测试,将正则抓取结果输出到屏幕上
}
}
#修改logstash配置文件,将数据输出到数据库
[root@localhost ~]# vim /usr/local/logstash/config/logstash.conf
input {
stdin{}
}
filter {
grok {
match => {
"message" => '(?[a-zA-Z]+ [0-9]+ [0-9:]+) (?[a-zA-Z]+).*'
}
}
}
output {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
}
}
#交互式启动logstash
[root@localhost ~]# /usr/local/logstash/bin/logstash -f /usr/local/logstash/config/logstash.conf
#输出结果如下
Aug 16 18:29:49 ELK systemd: Startup finished in 789ms (kernel) + 1.465s (initrd) + 18.959s (userspace) = 21.214s.
#logstash配置文件如下
[root@localhost ~]# vim /usr/local/logstash/config/logstash.conf
input {
stdin{}
}
filter {
grok {
match => {
"message" => '(?[a-zA-Z]+ [0-9]+ [0-9:]+) (?[a-zA-Z]+).*'
}
}
}
output {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
}
stdout { #标准输出到屏幕上
codec => rubydebug
}
}
#交互式启动logstash
[root@localhost ~]# /usr/local/logstash/bin/logstash -f /usr/local/logstash/config/logstash.conf
#输出结果如下
Aug 16 18:29:49 ELK systemd: Startup finished in 789ms (kernel) + 1.465s (initrd) + 18.959s (userspace) = 21.214s.
{
"mydate" => "Aug 16 18:29:49",
"@timestamp" => 2019-01-10T06:24:20.842Z,
"hostname" => "ELK",
"message" => "Aug 16 18:29:49 ELK systemd: Startup finished in 789ms (kernel) + 1.465s (initrd) + 18.959s (userspace) = 21.214s.",
"host" => "localhost",
"@version" => "1"
}
logstash如果直接把一整行日志直接发送给elasticsearch,kibana显示出来就没有什么意义,我们需要提取自己想要的字段。假如说我们想要提取响应码,用户访问url,响应时间等,就得依靠正则来提取。
#logstash提取数据段配置文件模板
input { #日志输入来源函数
file {
path => "/usr/local/nginx/logs/kibana_access.log"
}
}
filter { #字段数据提取函数
grok {
match => {
"message" => '(?<字段名>正则表达式).*'
}
}
}
output { #数据输出目的地函数
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
}
}
[root@localhost ~]# vim /usr/local/logstash/config/logstash.conf
input {
file {
path => "/usr/local/nginx/kibana_access.log"
}
}
filter {
grok {
match => {
"message" => '(?[0-9.]+) .*'
}
}
}
output {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
}
}
[root@localhost ~]# tail -1 /usr/local/nginx/logs/kibana_access.log
192.168.100.1 - - [10/Jan/2019:01:13:41 -0500] "PUT /api/saved_objects/index-pattern/780c6150-142a-11e9-8d9e-8bc1fa0c952d HTTP/1.1"200 430 "http://192.168.100.100:5609/app/kibana""Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0""-"
[root@localhost ~]# tail -1 /usr/local/nginx/logs/kibana_access.log
192.168.100.1 - - [10/Jan/2019:01:13:41 -0500] "PUT /api/saved_objects/index-pattern/780c6150-142a-11e9-8d9e-8bc1fa0c952d HTTP/1.1"200 430 "http://192.168.100.100:5609/app/kibana""Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:64.0) Gecko/20100101 Firefox/64.0""-"
[root@localhost ~]# vim /usr/local/logstash/config/logstash.conf
input {
file {
path => "/usr/local/nginx/kibana_access.log"
}
}
filter {
grok {
match => {
"message" => '(?[0-9.]+) .*HTTP/[0-9.]+"(?[0-9]+) (?[0-9]+)[ "]+(?[a-zA-Z]+://[0-9.]+:[0-9]+/[a-zA-Z/]+)".*'
}
}
}
output {
elasticsearch {
hosts => ["http://127.0.0.1:9200"]
}
}