环境
1. Linux rhel-server-6.9-x86_64-dvd
master 192.168.13.149
data 192.168.13.150
安装elasticsearch5.0————-master节点上
安装logstash-5.0.0.zip———–master节点上安装filebeat6.1.0—————-data节点
测试filebeat收集到logstash,logstash分析转发elasticsearch
约定当前用户名为xingye 密码也为xingye
在master节点虚拟机上
1. jdk-8u151-linux-x64.rpm 用ftp放在Linux上 /home/xingye/Downloads 注意当前用户(xingye)应有可执行权限
2. 切换到root用户执行安装
su root
password:xingye
cd /home/xingye/Downloads
[root@localhost Downloads]# rpm -ivh jdk-8u151-linux-x64.rpm
Preparing... ########################################### [100%]
1:jdk1.8 ########################################### [100%]
Unpacking JAR files...
tools.jar...
plugin.jar...
javaws.jar...
deploy.jar...
rt.jar...
jsse.jar...
charsets.jar...
localedata.jar...
#rpm -ivh /home/xingye/Downloads/jdk-8u151-linux-x64.rpm
3.java -version 检查安装是否成功
[root@localhost Downloads]# java -version
成功会打印一下信息
java version "1.8.0_151"
Java(TM) SE Runtime Environment (build 1.8.0_151-b12)
Java HotSpot(TM) 64-Bit Server VM (build 25.151-b12, mixed mode)
在master节点虚拟机上
约定当前用户名为xingye 密码也为xingye
1.elasticsearch-5.0.0.zip 放在 /home/xingye/Downloads
[xingye@localhost Downloads]$ ls -l
total 202304
-rw-rw-r--. 1 xingye xingye 32990775 Dec 25 02:06 elasticsearch-5.0.0.zip
-rw-rw-r--. 1 xingye xingye 174163338 Dec 25 02:00 jdk-8u151-linux-x64.rpm
[xingye@localhost Downloads]$ pwd
/home/xingye/Downloads
2.创建工作目录
[xingye@localhost home]$ su root
Password:
[root@localhost home]# mkdir /home/local
将local文件夹赋给用户xingye
[root@localhost home]# chown -R xingye:xingye local
[root@localhost home]# ll
total 8
drwxr-xr-x. 2 xingye xingye 4096 Dec 25 02:11 local
drwxr-xr-x. 25 xingye xingye 4096 Dec 25 02:11 xingye
3.解压到工作目录
[root@localhost home]# su xingye
[xingye@localhost home]$ unzip /home/xingye/Downloads/elasticsearch-5.0.0.zip -d /home/local
unzip /home/xingye/Downloads/elasticsearch-5.0.0.zip -d /home/local
4.重命名
[xingye@localhost home]$ cd local
[xingye@localhost local]$ ll
total 4
drwxr-xr-x. 6 xingye xingye 4096 Oct 26 2016 elasticsearch-5.0.0
[xingye@localhost local]$ mv /home/local/elasticsearch-5.0.0 /home/local/elasticsearch5.0
[xingye@localhost local]$ ll
total 4
drwxr-xr-x. 6 xingye xingye 4096 Oct 26 2016 elasticsearch5.0
5.配置文件修改
vim /home/local/elasticsearch5.0/config/elasticsearch.yml
输入i,进入编辑状态修改以下配置
集群:
cluster.name: es-log
节点
node.name: log-1
是否用swap
bootstrap.memory_lock: true
绑定IP
network.host: 0.0.0.0
http.port: 9200
Esc退出编辑状态
:wq 写入并退出
切换root用户 系统设置
[xingye@localhost config]$ su root
Password:
[root@localhost config]# vim /etc/sysctl.conf
输入i,进入编辑状态在最后一行添加
vm.max_map_count=262144
Esc退出编辑状态
:wq 写入并退出
生效
[root@localhost bin]# sysctl -p
系统打开文件数 锁定内存
[root@localhost config]# vim /etc/security/limits.conf
输入i,进入编辑状态在最后一行添加
* hard nofile 65536
* soft nofile 65536
* soft memlock unlimited
* hard memlock unlimited
Esc退出编辑状态
:wq 写入并退出
修改用户打开的线程数,因为es 的段要经常打开文件控制索引:
[root@localhost config]# vim /etc/security/limits.d/90-nproc.conf
输入i,进入编辑状态修改 1024为2048
* soft nproc 2048
Esc退出编辑状态
:wq 写入并退出
7.启动
cd /home/local/elasticsearch5.0/bin
[xingye@localhost bin]$ pwd
/home/local/elasticsearch5.0/bin
[xingye@localhost bin]$ ./elasticsearch
虚拟机有点慢
[2017-12-25T02:48:49,869][INFO ][o.e.n.Node ] [log-1] started
started就表示启动了
8.检测是否启动
新打开一个终端
curl -XGET '127.0.0.1:9200'
{
"name" : "log-1",
"cluster_name" : "es-log",
"cluster_uuid" : "PMvpxp3RQNKLV8EuwjUIPQ",
"version" : {
"number" : "5.0.0",
"build_hash" : "253032b",
"build_date" : "2016-10-26T04:37:51.531Z",
"build_snapshot" : false,
"lucene_version" : "6.2.0"
},
"tagline" : "You Know, for Search"
}
在master节点虚拟机上
约定当前用户名为xingye 密码也为xingye
1.logstash-5.0.0.zip 用ftp放在 /home/xingye/Downloads
2.创建工作目录(安装elasticsearch的时候已创建)
/home/local
3.解压到工作目录
unzip /home/xingye/Downloads/logstash-5.0.0.zip -d /home/local
4.重命名
[xingye@localhost ~]$ mv /home/local/logstash-5.0.0 /home/local/logstash5.0
[xingye@localhost ~]$ cd /home/local
[xingye@localhost local]$ ls
elasticsearch5.0 logstash5.0
5.验证安装是否成功
/home/local/logstash5.0/bin/logstash -e 'input{stdin{}}output{stdout{codec=>rubydebug}}'
hello world
回车以后等一段时间(耐心等待),打印以下内容则成功
{
"@timestamp" => 2017-12-18T10:20:48.043Z,
"@version" => "1",
"host" => "localhost.localdomain",
"message" => "hello world"
}
6.安装supervisor管理logstash(暂不安装supervisor)
wget https://pypi.python.org/packages/80/37/964c0d53cbd328796b1aeb7abea4c0f7b0e8c7197ea9b0b9967b7d004def/supervisor-3.3.1.tar.gz
7.解压到工作目录(暂不安装supervisor)
tar zxvf supervisor-3.3.1.tar.gz -C /home/local
8.切换root用户(暂不安装supervisor)
python setup.py install
9.输入数据到elasticsearch中
[xingye@localhost logstash5.0]$ /home/local/logstash5.0/bin/logstash -e 'input { stdin { } } output { elasticsearch { hosts => localhost } }'
hello
回车以后等一段时间(耐心等待)
成功会打印Successfully started Logstash API endpoint {:port=>9600}
10.查看elasticsearch
curl -XGET 'localhost:9200/_search?pretty'
输出包含以下内容
"hits" : {
"total" : 4,
"max_score" : 1.0,
"hits" : [
{
"_index" : "logstash-2017.12.18",
"_type" : "logs",
"_id" : "AWBppzXnGwYnIK10qskC",
"_score" : 1.0,
"_source" : {
"@timestamp" : "2017-12-18T12:44:06.649Z",
"@version" : "1",
"host" : "localhost.localdomain",
"message" : "hello"
}
},
在data节点虚拟机上
约定当前用户名为xingye 密码也为xingye
1. 创建工作目录
[xingye@localhost home]$ su root
Password:
[root@localhost home]# mkdir /home/local
将local文件夹赋给用户xingye
[root@localhost home]# chown -R xingye:xingye local
[root@localhost home]# ll
total 8
drwxr-xr-x. 2 xingye xingye 4096 Dec 25 02:11 local
drwxr-xr-x. 25 xingye xingye 4096 Dec 25 02:11 xingye
2.filebeat-6.1.0-linux-x86_64.tar.gz ftp上传到/home/xingye/Downloads
3.解压到工作目录
[xingye@localhost home]$ tar xzvf /home/xingye/Downloads/filebeat-6.1.0-linux-x86_64.tar.gz -C /home/local/
4.重命名
[xingye@localhost home]$ mv /home/local/filebeat-6.1.0-linux-x86_64 /home/local/filebeat6.1
5.修改配置文件
vim /home/local/filebeat6.1/filebeat.yml
输入i,进入编辑状态修改
- type: log
enabled: true
paths:
# - /var/log/*.log
- /home/local/logs/*.log
#- c:\programdata\elasticsearch\logs\*
#output.elasticsearch:
# Array of hosts to connect to.
# hosts: ["localhost:9200"]
output.logstash:
# The Logstash hosts
hosts: ["192.168.13.145:5044"]
Esc退出编辑状态
:wq 写入并退出
6.启动
[xingye@localhost filebeat6.1]$ pwd
/home/local/filebeat6.1
[xingye@localhost filebeat6.1]$ ./filebeat -e -c filebeat.yml -d "publish"
显示started则启动成功
2017/12/25 11:38:01.805226 registrar.go:150: INFO Starting Registrar
2017/12/25 11:38:01.805271 reload.go:127: INFO Config reloader started
2017/12/25 11:38:01.805440 reload.go:219: INFO Loading of config files completed.
准备测试数据
2016-12-01 11:18:05,550 INFO 127.0.0.1/zhangsan/GET/http://127.0.0.1:8080/xxxx/xxxx.html?xxxx
%{TIMESTAMP_ISO8601:date}\s%{LOGLEVEL:loglevel}\s%{IP:client}/%{USER:auth}/%{WORD:method}/%{NOTSPACE:request}
事件:logstash接收filebeat收集的日志 发送到elasticsearch
在master节点虚拟机上
1.创建事件配置目录
[xingye@localhost logstash5.0]$ mkdir /home/local/logstash5.0/etc
2.创建事件配置文件
[xingye@localhost logstash5.0]$ vim /home/local/logstash5.0/etc/filebeattoes.conf
输入i,进入编辑状态,内容如下
input {
beats{
port => "5044"
}
}
output{
elasticsearch{
hosts => ["192.168.13.149:9200"]
index => "logstash-%{+YYYY.MM.dd}"
flush_size => 20000
idle_flush_time =>10
}
}
Esc退出编辑状态
:wq 写入并退出
filebeat监听日志文件变化,并收集发送给logstash
在data节点虚拟机上
因为在filebeat.yml文件中监听的路径为/home/local/logs
创建启动配置
output {
elasticsearch {
hosts => "192.168.13.149:9200"
manage_template => false
index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}"
document_type => "%{[@metadata][type]}"
}
}
[xingye@master etc]$ cat filebeattoes.conf
input {
beats{
port => "5044"
}
}
filter {
grok {
match => { "message" => "%{TIMESTAMP_ISO8601:date}\s%{LOGLEVEL:loglevel}\s%{IP:client}/%{USER:auth}/%{WORD:method}/%{NOTSPACE:request}" }
}
}
output{
elasticsearch{
hosts => ["192.168.13.149:9200"]
index => "logstash-%{+YYYY.MM.dd}"
flush_size => 20000
idle_flush_time =>10
}
}
创建日志文件夹,日志文件
[xingye@localhost filebeat6.1]$ mkdir /home/local/logs
[xingye@localhost logs]$ vim /home/local/logs/test.log
写入测试数据
2016-12-01 11:18:05,550 INFO 127.0.0.1/zhangsan/GET/http://127.0.0.1:8080/xxxx/xxxx.html?xxxx
关闭master和data节点的防火墙
[xingye@localhost logs]$ su root
Password:
[root@localhost logs]# service iptables stop
iptables: Setting chains to policy ACCEPT: filter [ OK ]
iptables: Flushing firewall rules: [ OK ]
iptables: Unloading modules: [ OK ]
启动master节点上elasticsearch
[xingye@localhost bin]$ cd /home/local/elasticsearch5.0/bin
[xingye@localhost bin]$ ./elasticsearch
在后台启动
[xingye@localhost bin]$ ./elasticsearch -d
启动master节点上logstash
[xingye@localhost logstash5.0]$ cd /home/local/logstash5.0
[xingye@localhost logstash5.0]$ bin/logstash -f etc/filebeattoes.conf
有点慢,耐心等,如下显示表示启动成功
Successfully started Logstash API endpoint {:port=>9600}
安装logstash服务
root用户
bin/system-install
设置服务自启动:systemctl enable logstash
启动服务:systemctl start logstash
停止服务:systemctl stop logstash
重启服务:systemctl restart logstash
查看服务状态:systemctl status logstash
启动data节点上filebeat
[xingye@localhost filebeat6.1]$ ./filebeat -e -c filebeat.yml -d "publish"
后台启动
nohup ./filebeat -e -c filebeat.yml >/dev/null 2>&1 &
可以看到
filebeat控制台打印
2017/12/25 12:48:37.380277 processor.go:275: DBG [publish] Publish event: {
"@timestamp": "2017-12-25T12:48:37.379Z",
"@metadata": {
"beat": "filebeat",
"type": "doc",
"version": "6.1.0"
},
"source": "/home/local/logs/test.log",
"offset": 282,
"message": "2016-12-01 11:18:05,550 INFO 127.0.0.1/zhangsan/GET/http://127.0.0.1:8080/xxxx/xxxx.html?xxxx",
"prospector": {
"type": "log"
},
"beat": {
"name": "localhost.localdomain",
"hostname": "localhost.localdomain",
"version": "6.1.0"
}
}
查询elasticsearch
[xingye@localhost ~]$ curl -XGET '127.0.0.1:9200/_search/?pretty'
{
"_index" : "logstash-2017.12.25",
"_type" : "logs",
"_id" : "AWCNw5D12mbJQgn96cHq",
"_score" : 1.0,
"_source" : {
"@timestamp" : "2017-12-25T12:55:47.463Z",
"offset" : 188,
"@version" : "1",
"beat" : {
"hostname" : "localhost.localdomain",
"name" : "localhost.localdomain",
"version" : "6.1.0"
},
"host" : "localhost.localdomain",
"prospector" : {
"type" : "log"
},
"source" : "/home/local/logs/test.log",
"message" : "2016-12-01 11:18:05,550 INFO 127.0.0.1/zhangsan/GET/http://127.0.0.1:8080/xxxx/xxxx.html?xxxx",
"tags" : [
"beats_input_codec_plain_applied"
]
}
}
修改logstash的事件配置文件
input {
beats{
port => "5044"
}
}
filter {
grok {
match => { "message" => "%{TIMESTAMP_ISO8601:date}\s%{LOGLEVEL:loglevel}\s%{IP:client}/%{USER:auth}/%{WORD:method}/%{NOTSPACE:request}" }
}
}
output{
elasticsearch{
hosts => ["192.168.13.149:9200"]
index => "logstash-%{+YYYY.MM.dd}"
flush_size => 20000
idle_flush_time =>10
}
}
再次在data节点log文件写入相同的测试数据
vim /home/local/logs/test.log
再次查看elasticsearch
curl -XGET '127.0.0.1:9200/_search/?pretty'
会发现logstash将日志格式化,新增了date,requery等字段
"hits" : [
{
"_index" : "logstash-2017.12.25",
"_type" : "logs",
"_id" : "AWCN4yIpHCxaiZAuFzNw",
"_score" : 1.0,
"_source" : {
"date" : "2016-12-01 11:18:05,550",
"request" : "http://127.0.0.1:8080/xxxx/xxxx.html?xxxx",
"offset" : 658,
"method" : "GET",
"auth" : "zhangsan",
"prospector" : {
"type" : "log"
},
"source" : "/home/local/logs/test.log",
"message" : "2016-12-01 11:18:05,550 INFO 127.0.0.1/zhangsan/GET/http://127.0.0.1:8080/xxxx/xxxx.html?xxxx",
"tags" : [
"beats_input_codec_plain_applied"
],
"@timestamp" : "2017-12-25T13:35:58.134Z",
"loglevel" : "INFO",
"@version" : "1",
"beat" : {
"hostname" : "localhost.localdomain",
"name" : "localhost.localdomain",
"version" : "6.1.0"
},
"host" : "localhost.localdomain",
"client" : "127.0.0.1"
}
}
]