EFK中间件zookeeper,kafka

准备三台2x2的Centos7.0的虚拟机
三台为一组
每一台都要安装jdk

rz
jdk-8u131-linux-x64_
rpm -ivh jdk-8u131-linux-x64_

在/usr/local/src/ 传入 zookeeper-3.4.14.tar.gz kafka_2.11-2.2.0

tar zxvf zookeeper-3.4.14.tar.gz
mv zookeeper-3.4.14.tar.gz  zookeeper
mv zookeeper /usr/local/
cd zookeeper/
mkdir {zkdata,zkdatalog}
cd conf
cp zoo_sample.cfg   zoo.cfg
vim zoo.cfg(加入以下5行)
   dataDir=/usr/local/zookeeper/zkdata
   dataDir=/usr/local/zookeeper/zkdatalog
   server.1=192.168.234.142:2888:3888
   server.2=192.168.234.128:2888:3888
   server.3=192.168.234.132:2888:3888
:wq

在142上

echo '1' >>/usr/local/zookeeper/zkdata/myid
cat /usr/local/zookeeper/zkdata/myid
#会返回一个1

在128上

echo '2' >>/usr/local/zookeeper/zkdata/myid
cat /usr/local/zookeeper/zkdata/myid
#会返回一个2

在132上

echo '3' >>/usr/local/zookeeper/zkdata/myid
cat /usr/local/zookeeper/zkdata/myid
#会返回一个3
*************************************
cd /zookeeper/bin
./zkServer.sh start

在142上

cd /usr/local/kafka/config/
vim server.properties
broker.id=1
advertised.listeners=PLAINTEXT: //kafka01:9092
zookeeper.connect=192.168.234.142:2181,192.168.234.128:2181,192.168.234.132:2181

在128上

cd /usr/local/kafka/config/
vim server.properties
broker.id=2
advertised.listeners=PLAINTEXT: //kafka02:9092
zookeeper.connect=192.168.234.142:2181,192.168.234.128:2181,192.168.234.132:2181

在132上

cd /usr/local/kafka/config/
vim server.properties
broker.id=3
advertised.listeners=PLAINTEXT: //kafka03:9092
zookeeper.connect=192.168.234.142:2181,192.168.234.128:2181,192.168.234.132:2181
vim /etc/hosts   #此操作三台都要做
192.168.234.142 kafka01
192.168.234.128 kafka02
192.168.234.132 kafka03
:wq
cd /usr/local/kafka/bin
./kafka-server-start.sh -daemon ../config/server.properties
cd /usr/local/kafka/logs  查看
netstat -lptnu|grep 9092    #(9092)是kafka的端口
#(创建topic,后面随便加一台ip端口号:2181)  (复制两份)  (创建3个分区),不能小于节点数
./kafka-topics.sh --create --zookeeper 192.168.234.128:2181 --replication-factor 2 --partitions 3 --topic wg007
./kafka-topics.sh --list --zookeeper 192.168.234.128:2181

在第一台上:(生产者)

./kafka-console-producer.sh --broker-list 192.168.234.128:9092 --topic wg007

在第二台上:(消费者)

#ip是三台虚拟机任意一台就可以
/usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.234.142:9092 --topic wg007 --from-beginning
#第一台里写shenchanzhe
#第二台会返回shenchanzhe

在第一台里的/usr/local/kafka/bin下

vim kafka-create-topics.sh
#!/bin/bash
read -p "请输入你想创建的topic:" topic
cd /usr/local/kafka/bin
./kafka-topics.sh --create --zookeeper 192.168.234.128:2181 --replication-factor 2 --partitions 3 --topic wg007
:wq 
chmod +x kafka-create-topics.sh
./kafka-create-topics.sh nginx_log

配置yum源filebeat

vim /etc/yum.repos.d/filebeat.repo
[filebeat-6.x]
name=Elasticsearch repository for 6.x packages
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
autorefresh=1
type=rpm-md
:wq #保存退出
#安装filebeat
yum -y install filebeat

修改配置文件

vim /etc/filebeat
    paths:  #改成下面的形式
      - /var/log/nginx/*.log
#在最后一行output里 
topic: nginx_log
:wq #保存退出
systemctl enable filebeat
systemctl start filebeat

在第二台上

/usr/local/kafka/bin/kafka-console-consumer.sh --bootstrap-server 192.168.234.142:9092 --topic nginx_log --from-beginning

小提示:(:g/^#/d)删除文件里的注释

你可能感兴趣的:(ELK)