yum -y localinstal elasticsearch-7.17.3-x86_64.rpm
vim /etc/elasticsearch/elasticsearch.yml
cluster.name: elk
node.name: elk01
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 0.0.0.0
discovery.seed_hosts: ["10.10.10.11","10.10.10.12","10.10.10.13"]
cluster.initial_master_nodes: ["10.10.10.11","10.10.10.12","10.10.10.13"]
相关参数说明:
data_rsync.sh /etc/elasticsearch/elasticsearch.yml
并修改其他两个节点的name
rm -rf /var/{lib,log}/elasticsearch/* /tmp/*
ll /var/{lib,log}/elasticsearch/ /tmp/
systemctl start elasticsearch
9.验证集群状态
curl 10.10.10.11:9200/_cat/nodes?v
yum -y localinstall kibana-7.17.3-x86_64.rpm
vim /etc/kibana/kibana.yml
...
server.host: "10.10.10.11"
server.name: "kibana-server"
elasticsearch.hosts:
["http://10.10.10.11:9200","http://10.10.10.12:9200","http://10.10.10.13:9200"]
i18n.locale: "en"
systemctl enable --now kibana
systemctl status kibana
systemctl start kibana
yum -y localinstall filebeat-7.17.3-x86_64.rpm
mkdir /etc/filebeat/config
cat > /etc/filebeat/config/01-stdin-to-console.yml <<'EOF'
# 指定输⼊的类型
filebeat.inputs:
# 指定输⼊的类型为"stdin",表示标准输⼊
- type: stdin
# 指定输出的类型
output.console:
# 打印漂亮的格式
pretty: true
EOF
(2)运⾏filebeat实例,即可在控制台输入,控制台打印处理后的结果
filebeat -e -c /etc/filebeat/config/01-stdin-to-console.yml
filebeat.inputs:
- type: log
# 是否启动当前的输⼊类型,默认值为true
enabled: true
# 指定数据路径
paths:
- /tmp/test.log
- /tmp/*.txt
# 给当前的输⼊类型搭上标签
tags: ["测试","容器运维","DBA运维","SRE运维⼯程师"]
# ⾃定义字段
fields:
school: "出去了化工大学"
- type: log
enabled: true
paths:
- /tmp/test/*/*.log
tags: ["kafka","云原⽣开发"]
fields:
name: "测试"
hobby: "linux,抖⾳"
# 将⾃定义字段的key-value放到顶级字段.
# 默认值为false,会将数据放在⼀个叫"fields"字段的下⾯.
fields_under_root: true
output.console:
pretty: true
filebeat.inputs:
- type: log
# 是否启动当前的输⼊类型,默认值为true
enabled: true
# 指定数据路径
paths:
- /tmp/test.log
- /tmp/*.txt
# 给当前的输⼊类型搭上标签
tags: ["测试","容器运维","DBA运维","SRE运维⼯程师"]
# ⾃定义字段
fields:
school: "出去了化工大学"
- type: log
enabled: true
paths:
- /tmp/test/*/*.log
tags: ["kafka","云原⽣开发"]
fields:
name: "测试"
hobby: "linux,抖⾳"
# 将⾃定义字段的key-value放到顶级字段.
# 默认值为false,会将数据放在⼀个叫"fields"字段的下⾯.
fields_under_root: true
output.elasticsearch:
hosts:["http://10.10.10.11:9200","http://10.10.10.12:9200","http://10.10.10.13:9200"]
即可在kibana根据配置好的模板和pattern查询相应的日志记录
4. 配置日志过滤
include_lines: ['^ERROR', '^WARN','DANGER']
# 指定⿊名单,排除指定的内容
exclude_lines: ['^DBUG',"TEST"]
filebeat.inputs:
- type: log
# 是否启动当前的输⼊类型,默认值为true
enabled: true
# 指定数据路径
paths:
- /tmp/test.log
- /tmp/*.txt
# 给当前的输⼊类型搭上标签
tags: ["测试","容器运维","DBA运维","SRE运维⼯程师"]
# ⾃定义字段
fields:
school: "出去了化工大学"
- type: log
enabled: true
paths:
- /tmp/test/*/*.log
tags: ["kafka","云原⽣开发"]
fields:
name: "测试"
hobby: "linux,抖⾳"
# 将⾃定义字段的key-value放到顶级字段.
# 默认值为false,会将数据放在⼀个叫"fields"字段的下⾯.
fields_under_root: true
output.elasticsearch:
hosts:["http://10.10.10.11:9200","http://10.10.10.12:9200","http://10.10.10.13:9200"]
index: "es-linux-eslog-%{+yyyy.MM.dd}"
# 禁⽤索引⽣命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "es-linux-eslog"
# 设置索引模板的匹配模式
setup.template.pattern: "es-linux-eslog*"
filebeat.inputs:
- type: log
# 是否启动当前的输⼊类型,默认值为true
enabled: true
# 指定数据路径
paths:
- /tmp/test.log
- /tmp/*.txt
# 给当前的输⼊类型搭上标签
tags: ["测试","容器运维","DBA运维","SRE运维⼯程师"]
# ⾃定义字段
fields:
school: "出去了化工大学"
- type: log
enabled: true
paths:
- /tmp/test/*/*.log
tags: ["kafka","云原⽣开发"]
fields:
name: "测试"
hobby: "linux,抖⾳"
# 将⾃定义字段的key-value放到顶级字段.
# 默认值为false,会将数据放在⼀个叫"fields"字段的下⾯.
fields_under_root: true
output.elasticsearch:
hosts:["http://10.10.10.11:9200","http://10.10.10.12:9200","http://10.10.10.13:9200"]
indices:
- index: "es-linux-eslog-%{+yyyy.MM.dd}"
# 匹配指定字段包含的内容
when.contains:
tags: "DBA运维"
- index: "es-linux-eslog-DBA-%{+yyyy.MM.dd}"
when.contains:
tags: "kafka"
#
# 禁⽤索引⽣命周期管理
setup.ilm.enabled: false
# 设置索引模板的名称
setup.template.name: "es-linux-eslog"
# 设置索引模板的匹配模式
setup.template.pattern: "es-linux-eslog*"
multiline.type: pattern
# 指定匹配模式
#表示收集[开头到下一个[开头的位置的日志
multiline.pattern: '^\['
# 下⾯2个参数参考官⽅架构图即可,如上图所示。
multiline.negate: true
multiline.match: after