接上篇我们,我们安装了单机版的kafka,现在我们根据单机版的kafka来搭建kafka配置。是为了可以把一些配置分发到其他机子上,提高效率
分别在另外两台机子安装jdk,及解压zk和kafka
删除三台机子的ssh认证,以便文件分发
rm -rf .ssh
在101机子上修改host配置
vi /etc/hosts
#配置如下
172.16.2.130 CentOS101
172.16.2.131 CentOS102
172.16.2.132 CentOS103
ping centos102
ping centos103
#测试发现能ping通,说明配置没问题
#将CentOS101本机下的etc/hosts文件下的hosts文件拷贝到CentOS102 和CentOS103下
scp /etc/hosts CentOS102:/etc/
scp /etc/hosts CentOS103:/etc/
#将101机子环境变量信息拷贝到102 103机子上
scp .bashrc CentOS102:~/
scp .bashrc CentOS103:~/
service iptables stop
chkconfig iptables off
chkconfig --list | grep iptables
yum install ntp -y
#发现报错如下:
#YumRepo Error: All mirror URLs are not using ftp, http[s] or file.
#Eg. Invalid release/repo/arch combination/
#不要慌,这是因为默认下载的centos镜像来源是国外的,网不通我们换成清华大学开源的镜像站里的即可
#https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/os/x86_64/
#配置镜像如下:
vi /etc/yum.repos.d/CentOS-Base.repo
[base]
#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/os/x86_64/
gpgcheck=1
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
gpgkey=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/os/x86_64/RPM-GPG-KEY-CentOS-6
[updates]
#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/updates/x86_64/
gpgcheck=1
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
gpgkey=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/os/x86_64/RPM-GPG-KEY-CentOS-6
[extras]
#baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/extras/x86_64/
gpgcheck=1
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
gpgkey=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/os/x86_64/RPM-GPG-KEY-CentOS-6
[centosplus]
#baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/centosplus/x86_64/
gpgcheck=1
enabled=0
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
gpgkey=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/os/x86_64/RPM-GPG-KEY-CentOS-6
[contrib]
#baseurl=http://mirror.centos.org/centos/$releasever/contrib/$basearch/
baseurl=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/contrib/x86_64/
gpgcheck=1
enabled=0
#gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-6
gpgkey=https://mirrors.tuna.tsinghua.edu.cn/centos-vault/6.10/os/x86_64/RPM-GPG-KEY-CentOS-6
配完之后再
yum install ntp -y
安装时钟同步服务器即可
ntpdate ntp1.aliyun.com
clock -w
与配置单机不同需要加上另外两台机子的节点信息
cd /usr/zookeeper-3.4.6/
vi conf/zoo.cfg
#修改zk的配置如下
dataDir=/root/zkdata
#autopurge.purgeInterval=1
server.1=CentOS101:2888:3888
server.2=CentOS102:2888:3888
server.3=CentOS103:2888:3888
#配置完成后同样将配置信息分发到102 103上
scp -r /usr/zookeeper-3.4.6 CentOS102:/usr/
scp -r /usr/zookeeper-3.4.6 CentOS102:/usr/
#拷贝完后,分别在102 103上创建zkdata数据存储目录
mkdir zkdata
#将id号定向到myid文件
#101机子上执行
echo 1 > /root/zkdata/myid
#102机子上执行
echo 2 > /root/zkdata/myid
#103机子上执行
echo 3 > /root/zkdata/myid
#然后分别启动zk测试下
/usr/zookeeper-3.4.6/bin/zkServer.sh start zoo.cfg
/usr/zookeeper-3.4.6/bin/zkServer.sh status zoo.cfg
jsp
#若是启动错误请检查id映射和防火墙是否关闭
#修改kafka单机的配置,将zk单节点改成集群的方式
cd /usr/kafka_2.11-2.2.0/
vi config/server.properties
#修改zk的节点如下
zookeeper.connect=CentOS101:2181,CentOS102:2181,CentOS103:2181
cd ..
scp -r kafka_2.11-2.2.0 CentOS102:/usr/
scp -r kafka_2.11-2.2.0 CentOS103:/usr/
#进入到102 103 机子上修改配置
#102改为
cd /usr/kafka_2.11-2.2.0/
vi config/server.properties
broker.id=1
listeners=PLAINTEXT://CentOS102:9092
#103改为
cd /usr/kafka_2.11-2.2.0/
vi config/server.properties
broker.id=2
listeners=PLAINTEXT://CentOS103:9092
#测试kafka能否启动,注意启动前看下zk是否启动了(注意启动本机kafka前需要确认本机zk是否启动,否则你看到的kafka是假启动状态)
cd /usr/kafka_2.11-2.2.0/
./bin/kafka-server-start.sh -daemon config/server.properties
jps
#集群中创建topic
./bin/kafka-topics.sh --bootstrap-server CentOS101:9092,CentOS102:9092,CnetOS103:9092 --create --topic topic01 --partitions 2 --replication-factor 2
#查看集群中创建了哪些topic
./bin/kafka-topics.sh --bootstrap-server CentOS101:9092,CentOS102:9092,CnetOS103:9092 --list
#查看集群中创建的topic的详细信息
./bin/kafka-topics.sh --bootstrap-server CentOS101:9092,CentOS102:9092,CnetOS103:9092 --describe --topic topic01
#修改topic的分区数,注意分区数只能由低改为高
./bin/kafka-topics.sh --bootstrap-server CentOS101:9092,CentOS102:9092,CnetOS103:9092 --alter --topic topic01 --partitions 3
#删除topic
./bin/kafka-topics.sh --bootstrap-server CentOS101:9092,CentOS102:9092,CnetOS103:9092 --delete --topic topic01
#查看kafka log日志发现实际上kafka是将日志文件打了个delete 标记
ls /usr/kafka-logs/
# 消费者等待消费
./bin/kafka-topics.sh --bootstrap-server CentOS101:9092,CentOS102:9092,CnetOS103:9092 --create --topic topic01 --partitions 3 --replication-factor 3
./bin/kafka-console-consumer.sh --bootstrap-server Cent101:9092,CentOS102:9092,CentOS103:9092 --topic topic01 --group g1 --property print.key=true --property print.value=true --property key.separator=,
# 上面意思为创建消费组g1 去消费topic01 并打印key 和 value 已逗号分割
# 生产者创建
./bin/kafka-console-producer.sh --broker-list Cent101:9092,CentOS102:9092,CentOS103:9092 --topic topic01
# 查看消费组有哪些
./bin/kafka-consumer-groups.sh --bootstrap-server CentOS101:9092,CentOS102:9092,CentOS103:9092 --list
# 查看对应某个消费组的详细信息
./bin/kafka-consumer-groups.sh --bootstrap-server CentOS101:9092,CentOS102:9092,CentOS103:9092 --describe --group g1