[1]: elk组件 https://www.elastic.co/cn/downloads/past-releases
1. logstash
https://artifacts.elastic.co/downloads/logstash/logstash-6.3.2.tar.gz
2. filebeat
https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.3.2-linux-x86_64.tar.gz
3. elasticsearch
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.3.2.tar.gz
4. kibana
https://artifacts.elastic.co/downloads/kibana/kibana-6.3.2-linux-x86_64.tar.gz
安装filebeat
tar xf filebeat-6.3.2-linux-x86_64.tar.gz
mv filebeat-6.3.2-linux-x86_64 /usr/local/
ln -s /usr/local/filebeat-6.3.2-linux-x86_64 /usr/local/filebeat
配置filebeat
vim filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/*.log
fields:
index_name: host_name #自定义字段
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 3
setup.kibana:
#host: "localhost:5601"
#将数据直接写到elasticsearch
output.elasticsearch:
hosts: ["localhost:9200"]
#将数据直接写到logstash
output.logstash:
hosts: ["localhost:5044"]
启动filebeat
$filebeat_home/filebeat --path.logs ./logs/ -c filebeat.yml
安装logstash
tar xf logstash-6.3.2.tar.gz
mv logstash-6.3.2 /usr/local/
ln -s /usr/local/logstash-6.3.2 /usr/local/logstash
配置logstash
vim $logstash_home/logstash.conf
#此文件没有,自己创建,此文件主要定义日志数据输入输出以及过滤操作
input {
beats { port => "5044" }
}
output {
#stdout { codec => json }
#stdout { codec => rubydebug }
elasticsearch {
hosts => "172.104.39.141:9200"
#自定义索引名
#index => "test-%{+YYYY.MM.dd}"
index => ["%{[fields][index_name]}-%{+YYYY.MM.dd}"]
#filebeat中定义的fields字段
}
}
启动logstash
$logstash_home/bin/logstash -f $logstash_home/logstash.conf
安装elasticsearch
tar xf elasticsearch-6.3.2.tar.gz
mv elasticsearch-6.3.2 /usr/local/
ln -s /usr/local/elasticsearch-6.3.2 /usr/local/elasticsearch
创建elasticsearch用户
useradd elasticsearch
修改elasticsearch目录权限
chown -R $elasticsearch_home/
注意:若配置文件中指定了其他目录,也需要给权限
配置elasticsearch
vim $elasticsearch_home/config/elasticsearch.yml
node.name: node-1
path.data: /opt/es
path.logs: /var/logs/elasticsearch
network.host: 127.0.0.1
http.port: 9200
启动elasticsearch
su elasticsearch -c $elasticsearch_home/bin/elasticsearch
注意: elasticsearch不能使用root用户登录
安装kibana
tar xf kibana-6.3.2-linux-x86_64.tar.gz
mv kibana-6.3.2-linux-x86_64 /usr/local/
ln -s /usr/local/kibana-6.3.2-linux-x86_64 /usr/local/kibana
配置kibana
server.port: 5601
server.host: "192.168.1.100"
server.name: "test.rm-root.com"
elasticsearch.url: "http://127.0.0.1:9200"
启动kibana
#获取所有索引
curl 'localhost:9200/_cat/indices?v'
#删除指定索引
curl -XDELETE http://localhost:9200/index_name
#删除所有索引
curl -XDELETE http://localhost:9200/_all
#一次删除多个索引
curl -XDELETE http://localhost:9200/index_name,index_name
#!/usr/bin/python
import requests
import time
import re
import os
def get_all_index():
try:
resluts = requests.get('http://localhost:9200/_cat/indices?v')
return resluts.text.split('\n')
except Exception as e:
raise e
def get_before_time(before=0):
return time.strftime('%Y.%m.%d',time.localtime(time.time()-before))
def conv_timestamp(time_str, format='%Y-%m-%d'):
timeArr = time.strptime(time_str, format)
return time.mktime(timeArr)
if __name__ == "__main__":
all_index = get_all_index()
regx = re.compile('(^logstash|^cbos[0-9]{2,3})-[0-9]{4}\.[0-9]{2}\.[0-9]{2}')
for index_line in all_index:
if not index_line:
break
index_name = index_line.split()[2]
if index_name and re.search(regx, index_name):
time_str = index_name.split('-')[1]
index_time_stamp = conv_timestamp(time_str, '%Y.%m.%d')
before_time_stamp = conv_timestamp(get_before_time(
before=1209600),
format='%Y.%m.%d'
)
if before_time_stamp > index_time_stamp:
url = 'http://localhost:9200/%s' % index_name
#print index_name
delete_status = requests.delete(url)
if delete_status.status_code == 200:
print "delete index %s sucess" % index_name
else:
print "delete index %s faild" % index_name