1、elasticsearch-7.9.3 |
修改配置:
#ubuntu需要加上node.name sentos7可以不需要,尽量还是加上吧
node.name: node-1
network.host: 0.0.0.0
http.port: 9200
http.cors.enabled: true
http.cors.allow-origin: "*"
#这里可以换未cluster.initial_master_nodes: ["node-1"]
cluster.initial_master_nodes: ["192.168.28.129:9300"]
filebeat.inputs:
- type: log
enabled: true
paths:
# 配置我们要读取的 Spring Boot 应用的日志
- /home/aisys/logs/member-service/*.log
fields:
# #定义日志来源,添加了自定义字段
log_topic: member-service
- type: log
enabled: true
paths:
- /home/aisys/logs/yoostar-gateway/*.log
fields:
log_topic: yoostar-gateway
#----------------------------- kafka output --------------------------------
output.kafka:
enabled: true
hosts: [“192.168.28.128:9092”]
topic: ‘tv-%{[fields][log_topic]}’
partition.round_robin:
reachable_only: false
required_acks: 1
compression: gzip
server.host: “0.0.0.0”
elasticsearch.hosts: [“http://10.20.22.30:9200”]
i18n.locale: “zh-CN"
input {
kafka {
bootstrap_servers =>“192.168.28.128:9092”
topics_pattern =>“tv-.*”
consumer_threads =>5
decorate_events =>true
codec =>“json”
auto_offset_reset =>“earliest”
#集群需要相同
group_id =>“logstash1”
}
}
filter{
json{
source =>“message”
target =>“doc”
}
}
output{
elasticsearch{
action =>“index”
hosts =>[“192.168.28.128:9200”]
#索引里面如果有大写字母就无法根据topic动态生成索引,topic也不能有大写字母
index =>”%{[fields][log_topic]}-%{+YYYY-MM-dd}"
}
stdout{
codec =>rubydebug
}
}
// 下载
v10.9.0
配置环境变量
vim /etc/profile
export PATH=$PATH:/usr/local/node-v10.9.0-linux-x64/bin
刷新配置
source /etc/profile
执行npm install -g grunt-cli 编译源码
执行npm install 安装服务
如果查询install.js错误执行npm -g install phantomjs-prebuilt@2.1.16 –ignore-script
执行grunt server启动服务。或者 nohup grunt server >output 2>&1 &
启动服务之后访问http/10.20.22.30:9100/
yum -y install wget
wget https://mirror.bit.edu.cn/apache/kafka/2.5.0/kafka_2.13-2.5.0.tgz
tar -zxvf kafka_2.13-2.5.0.tgz
cd kafka_2.13-2.5.0/config/
vim server.properties
broker.id=0
port=9092 #端口号
host.name=172.30.0.9 #服务器IP地址,修改为自己的服务器IP
log.dirs=/usr/local/logs/kafka #日志存放路径,上面创建的目录
zookeeper.connect=localhost:2181 #zookeeper地址和端口,单机配置部署,localhost:2181
vim zookeeper_start.sh
启动zookeeper
/usr/local/kafka_2.13-2.5.0/bin/zookeeper-server-start.sh /usr/local/kafka_2.13-2.5.0/config/zookeeper.properties &
编写kafka启动脚本
vim kafka_start.sh
启动kafaka
/usr/local/kafka_2.13-2.5.0/bin/kafka-server-start.sh /usr/local/kafka_2.13-2.5.0/config/server.properties &
编写zookeeper停止脚本
vim zookeeper_stop.sh
停止zookeeper
/usr/local/kafka_2.13-2.5.0/bin/zookeeper-server-stop.sh /usr/local/kafka_2.13-2.5.0/config/zookeeper.properties &
编写kafka停止脚本
vim kafka_stop.sh
停止kafka
/usr/local/kafka_2.13-2.5.0/bin/kafka-server-stop.sh /usr/local/kafka_2.13-2.5.0/config/server.properties &
启动关闭脚本赋予权限
chmod 777 kafka_start.sh
chmod 777 kafka_stop.sh
chmod 777 zookeeper_start.sh
chmod 777 zookeeper_stop.sh
./zookeeper_start.sh---------------------------------------------启动zookeeper
./kafka_start.sh----------------------------------------------------启动kafka
ps -ef | grep zookeeper------------------------------------------查看zookeeper进程状态
ps -ef | grep kafka-------------------------------------------------查看kafka进程状态
若出现kafka.common.InconsistentClusterIdException: The Cluster ID MoJxXReIRgeVz8GaoglyXw doesn’t match stored clusterId Some(t4eUcr1HTVC_VjB6h-vjyA) in meta.properties异常解决方法 意思是集群id跟元数据meta.properties中存储的不一致,导致启动失败。因此去查看meta.properties文件中的元数据信息。这个文件的存储路径是通过/config/server.properties配置文件中的log.dirs属性配置的。所以通过配置文件找到meta.properties,修改里面的cluster.id即可。 将异常信息中的Cluster ID MoJxXReIRgeVz8GaoglyXw写入
启动es出现以下错误是不能用root用户进行启动es
groupadd es
useradd es -g es -p es
chown -R es:es /usr/local/elasticsearch-7.9.3/
chown -R es:es /usr/local/kibana-7.9.3-linux-x86_64
su es
./elasticsearch -d
nohup bin/kibana >output 2>&1 &
访问 http://10.20.22.30:5601/ ,即可访问 kibana
su root #切换成root用户
nohup ./filebeat -e -c filebeat.yml >output 2>&1 &
nohup ./bin/logstash -f ./config/logstash.conf >output 2>&1 &
nohup java -jar zipkin-server-2.19.0-exec.jar --KAFKA_BOOTSTRAP_SERVERS=10.20.22.30:9092 --STORAGE_TYPE=elasticsearch --ES_HOSTS=http://10.20.22.30:9200 >output 2>&1 &
访问 http://10.20.22.30:9411/zipkin/ 即可查看zipkin
在/home/aisys/logs/yoostar-gateway放入日志文件
Elasticsearch查询索引数据
kinaba查看数据
./filebeat modules enable nginx
修改配置文件
vim modules.d/nginx.yml
由于我filebeat配置是一个日志文件对应一个topic所以还需要修改nginx对应的数据topic
vim module/nginx/error/config/nginx-error.yml
vim module/nginx/access/config/nginx-access.yml
再次重启filebeat大功告成