环境:
centos7
192.168.10.130:jdk,zookeeper,kafka,filebeat,elasticsearch
192.168.10.131:jdk,zookeeper,kafka,logstash
192.168.10.132:jdk,zookeeper,kafka,kibana
elasticsearch包:
链接: https://pan.baidu.com/s/15_W_Kqfu0Gvk14bFunXNGg 提取码: 4a99 复制这段内容后打开百度网盘手机App,操作更方便哦
jdk包:
链接: https://pan.baidu.com/s/1YXeyNrqLHAQuj7LVQwxepQ 提取码: njmf 复制这段内容后打开百度网盘手机App,操作更方便哦
logstash包:
链接: https://pan.baidu.com/s/1hcRpNOEG5_5RfGgAulD1dA 提取码: jrcg 复制这段内容后打开百度网盘手机App,操作更方便哦
kibana包:
链接: https://pan.baidu.com/s/185sbP5Ey4WkNuh7pNA5nIg 提取码: 5fae 复制这段内容后打开百度网盘手机App,操作更方便哦
zookeeper包:
链接: https://pan.baidu.com/s/1HSyYOrgDBt1IUcdejytdYw 提取码: 9itu 复制这段内容后打开百度网盘手机App,操作更方便哦
kafka包:
链接: https://pan.baidu.com/s/1Az07NZzskFvcbsyFpqJ7oQ 提取码: b4jv 复制这段内容后打开百度网盘手机App,操作更方便哦
filebeat包:
链接: https://pan.baidu.com/s/1QM61Te_Jj7hg5z8KTTfhjg 提取码: 5zqf 复制这段内容后打开百度网盘手机App,操作更方便哦
ntpdate pool.ntp.org
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# setenforce 0
[root@localhost ~]# hostnamectl set-hostname kafka1
[root@localhost ~]# hostnamectl set-hostname kafka02
[root@localhost ~]# hostnamectl set-hostname kafka03
192.168.179.130 kafka01
192.168.179.131 kafka02
192.168.179.132 kafka03
[root@kafka03 src]# rpm -ivh jdk-8u131-linux-x64_.rpm
[root@kafka01 src]# tar xzf zookeeper-3.4.14.tar.gz
mv zookeeperr-3.4.14 /usr/local/zookeeper
cd /usr/local/zookeeper/conf/
mv zoo_sample.cfg zoo.cfg
server.1=192.168.179.130:2888:3888
server.2=192.168.179.131:2888:3888
server.3=192.168.179.132:2888:3888
mkdir /tmp/zookeeper
echo "1" > /tmp/zookeeper/myid
echo "2" > /tmp/zookeeper/myid
echo "3" > /tmp/zookeeper/myid
/usr/local/zookeeper/bin/zkServer.sh start(按顺序启动1,2,3)
[root@kafka03 conf]# /usr/local/zookeeper/bin/zkServer.sh status
一个leader
两个follower
tar zxvf kafka_2.11-2.2.0.tgz
mv kafka_2.11-2.2.0 /usr/local/kafka
broker.id=分别为0,1,2(3个机子)
advertised.listeners=PLAINTEXT://(主机名kafka01,kafka02,kafk03):9092
zookeeper.connect=192.168.179.130:2181,192.168.179.131:2181,192.168.179.132:2181
/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties
验证:
tailf /usr/local/kafka/logs/server.log 查看日志
[root@kafka03 src]# netstat -lptnu|grep 9092
tcp6 0 0 :::9092 :::* LISTEN 9814/java
[root@kafka01 logs]# /usr/local/kafka/bin/kafka-topics.sh --create --zookeeper 192.168.10.131:2181 --replication-factor 2 --partitions 3 --topic wg007
Created topic wg007.
netstat -lptnu|grep 2181 查看端口
usr/local/kafka/bin/
./kafka-console-producer.sh --broker-list 192.168.179.131:9092 --topic wg007
/usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.179.131:9092 --topic wg007 --from-beginning
130:哈哈哈
131:显示哈哈哈
[root@kafka02 bin]# /usr/local/kafka/bin/kafka-topics.sh --list --zookeeper 192.168.10.130:2181
__consumer_offsets
msg
wg007
[root@kafka01 src]# rpm -ivh filebeat-6.8.12-x86_64.rpm
cd /etc/filebeat
mv filebeat.yml filebeat.yml.bak
vim filebeat.yml
[root@kafka01 filebeat]# cat filebeat.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/messages
output.kafka:
enabled: true
hosts: ["192.168.179.130:9092","192.168.179.131:9092","192.168.179.132:9092"]
topic: msg
systemctl start filebeat
tailf /var/log/filebeat/filebeat
[root@kafka02 ELK]# rpm -ivh logstash-6.6.0.rpm
vim /etc/logstash/conf.d/msg.conf
input{
kafka{
bootstrap_servers => ["192.168.179.130:9092,192.168.179.131:9092,192.168.179.132:9092"]
group_id => "logstash"
topics => "msg"
consumer_threads => 5
}
}
output{
elasticsearch {
hosts => "192.168.179.130:9200"
index => "msg-%{+YYYY.MM.dd}"
}
}
systmectl start logstash
tailf /var/log/logstash/logstash-plain.log
vim /etc/elasticsearch/elasticsearch.yml
[root@bogon ELK]# cat /etc/elasticsearch/elasticsearch.yml |grep -v "^#"
cluster.name: wg007
node.name: node-1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.1.7
http.port: 9200
tailf /var/log/elasticsearch/wg007.log
netstat -lptnu|grep 9200
rpm -ivh kibana-6.6.2-x86_64.rpm
编辑vim /etc/kibana/kibana.yml
[root@bogon ELK]# cat /etc/kibana/kibana.yml |grep -v "^#"|sed '/^$/d'
server.port: 5601
server.host: "192.168.179.132"
elasticsearch.hosts: ["http://192.168.179.130:9200"]
systemctl start kibana
netstat -lptnu|grep 5601
登录网页192.168.179.132:5601