官网下载所需版本,地址https://www.elastic.co/downloads/
kibana 6以后版本增加了查看日志上下文功能
最新logstash ES需要jdk1.8支持
elasticsearch-6.7.0-linux-x86_64.tar.gz jdk-8u121-linux-x64.rpm
kibana-6.7.0-linux-x86_64.tar.gz logstash-6.7.0.tar.gz
下载完成后安装jdk并配置环境变量
vim /etc/profile
末尾添加
JAVA_HOME=/usr/java/jdk1.8.0_121
PATH=$PATH:$JAVA_HOME/bin
export JAVA_HOME PATH
source /etc/profile
解压配置ELK
tar -xvf elasticsearch-6.7.0-linux-x86_64.tar.gz
tar -xvf kibana-6.7.0-linux-x86_64.tar.gz
tar -xvf logstash-6.7.0.tar.gz
配置ES
vim elasticsearch.yml #添加以下配置
cluster.name: es_cluster
node.name: node0
path.data: /software/elasticsearch/data
path.logs: /software/elasticsearch/logs
network.host: 0.0.0.0
http.port: 9200
transport.tcp.port: 9300
ES不允许使用root用户运行,需要建单独的用户
useradd esroot
passwd esroot
chown -R esroot elasticsearch/
su esroot
./bin/elasticsearch 启动es会有如下报错:
ERROR: [3] bootstrap checks failed
[1]: max file descriptors [4096] for elasticsearch process is too low, increase to at least [65535]
[2]: max number of threads [1804] for user [esroot] is too low, increase to at least [4096]
[3]: max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
vim /etc/security/limits.conf 增加以下配置
* soft nofile 65536
* hard nofile 65536
* soft nproc 4096
* hard nproc 4096
vi /etc/sysctl.conf
vm.max_map_count=262144
sysctl -p
再次启动ES
配置logstash
如服务器java环境不是jdk1.8,可在启动脚本中加入
export JAVA_HOME=/usr/java/jdk1.8
export JRE_HOME=/usr/java/jdk1.8/jre
vim logstash.conf 增加配置
input {
file {
type => "CRM"
path => "/software/crm_log_.txt"
start_position => beginning
codec => multiline {
pattern => "%{TIMESTAMP_ISO8601}"
negate => true
what => "previous"
}
}
}
filter {
if [type] == "CRM"{
grok{
match=>{"message"=>"%{TIMESTAMP_ISO8601:timestamp} %{LOGLEVEL:level} %{JAVALOGMESSAGE:msg}" }
}
}
}
output {
elasticsearch {
action => "index" #The operation on ES
hosts => "192.168.1.237:9200"
index => "tomcatlog_%{+YYYY.MM.dd}"
user => "esadmin"
password => "kcwl2017"
}
}
启动 ./bin/logstash -f config/logstash.conf
配置kibana
vim config/kibana.yml 添加配置
server.port: 5601
server.host: "192.168.1.237"
elasticsearch.url: "http://IP:9200" # kibana监听的es集群
elasticsearch.username: "esadmin"
elasticsearch.password: "admin996"
kibana.index: ".kibana"
启动 ./bin/kibana
在kibana创建index pattern 需与logstash 配置文件中index相同
增加页面访问验证
yum -y install httpd nginx
htpasswd -c -b htpasswd.kibana esadmin 123456
cp htpasswd.kibana /etc/nginx/
vim /etc/nginx/conf.d/kibana.conf 增加以下配置
server {
listen 80;
server_name 192.168.1.237; #主机名
auth_basic "Restricted Access";
auth_basic_user_file /etc/nginx/htpasswd.kibana; #登录验证
location / {
proxy_pass http://192.168.1.237:5601; #转发到kibana,kibana ip配置成内网 ,如果阿里云服务器配置需要用域名访问则需要配置为公网IP
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
}
}
systemctl start nginx
访问页面验证