系统版本:Centos7.4
kafka版本:kafka_2.11-2.4.0
zookeeper版本:zookeeper-3.5.7
JDK版本:jdk1.8
服务器:
192.168.1.101
192.168.1.102
192.168.1.103
部署目录
/webapps/kafka
/webapps/zookeeper
zookeeper集群,就是配置文件中相互加入
1.修改配置文件
#192.168.1.101配置
vi /webapps/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/webapps/zookeeper_data/
clientPort=12181
server.1=0.0.0.0:12888:13888
server.2=192.168.1.102:12888:13888
server.3=192.168.1.103:12888:13888
##SASL安全认证----
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000
##------
#192.168.1.102配置
vi /webapps/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/webapps/zookeeper_data/
clientPort=12181
server.1=192.168.1.101:12888:13888
server.2=0.0.0.0:12888:13888
server.3=192.168.1.103:12888:13888
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000
#192.168.1.103配置
vi /webapps/zookeeper/conf/zoo.cfg
tickTime=2000
initLimit=10
syncLimit=5
dataDir=/webapps/zookeeper_data/
clientPort=12181
server.1=192.168.1.101:12888:13888
server.2=192.168.1.102:12888:13888
server.3=0.0.0.0:12888:13888
authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000
2.每天服务器增加唯一myid
上一步配置文件中目录 dataDir=/webapps/zookeeper_data/
#192.168.1.101配置
cd /webapps/zookeeper_data
cat "1" > myid
#192.168.1.102配置
cd /webapps/zookeeper_data
cat "2" > myid
#192.168.1.103配置
cd /webapps/zookeeper_data
cat "3" > myid
3.增加安全认证信息文件zk_server_jaas.conf
3台服务器都相同,conf文件可放在任意位置
vi /webapps/zookeeper/conf/zk_server_jaas.conf
Server {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="admin"
user_kafka="kafka";
};
4.将kafka相关jar包导入zookeeper
将kafka/lib下相关jar包复制到zookeeper/lib下
kafka-clients-1.1.1.jar
lz4-java-1.4.1.jar
slf4j-api-1.7.25.jar
slf4j-log4j12-1.7.25.jar
snappy-java-1.1.7.1.jar
5.修改bin/zkEnv.sh,让zk启动时读取到zk_server_jaas.conf
vi /webapps/zookeeper/bin/zkEnv.sh
#增加
export SERVER_JVMFLAGS=" -Djava.security.auth.login.config=/webapps/zookeeper/conf/zk_server_jaas.conf"
6.启动
bin/zkServer.sh start
其他命令
停止:bin/zkServer.sh stop
重启:bin/zkServer.sh restart
状态:bin/zkServer.sh status
1.修改配置文件
#192.168.1.101配置
vi /webapps/kafka/config/server.properties
#设置集群内唯一标识
broker.id=1
listeners=SASL_PLAINTEXT://0.0.0.0:9092
#安全认证监控服务
advertised.listeners=SASL_PLAINTEXT://192.168.1.101:9092
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
allow.everyone.if.no.acl.found=true
#关闭自动创建topic,默认true
auto.create.topics.enable=false
#log目录
log.dirs=/webapps/kafka/logs
#分区数
num.partitions=3
#topic的offset的备份份数
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
#默认备份份数,仅指自动创建的topics
default.replication.factor=3
#zookeeper连接串
zookeeper.connect=192.168.1.101:12181,192.168.1.102:12181,192.168.1.103:12181
#192.168.1.102配置
vi /webapps/kafka/config/server.properties
#设置集群内唯一标识
broker.id=2
listeners=SASL_PLAINTEXT://0.0.0.0:9092
#安全认证监控服务
advertised.listeners=SASL_PLAINTEXT://192.168.1.102:9092
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
allow.everyone.if.no.acl.found=true
#关闭自动创建topic,默认true
auto.create.topics.enable=false
#log目录
log.dirs=/webapps/kafka/logs
#分区数
num.partitions=3
#topic的offset的备份份数
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
#默认备份份数,仅指自动创建的topics
default.replication.factor=3
#zookeeper连接串
zookeeper.connect=192.168.1.101:12181,192.168.1.102:12181,192.168.1.103:12181
#192.168.1.103配置
vi /webapps/kafka/config/server.properties
#设置集群内唯一标识
broker.id=3
listeners=SASL_PLAINTEXT://0.0.0.0:9092
#安全认证监控服务----
advertised.listeners=SASL_PLAINTEXT://192.168.1.103:9092
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
allow.everyone.if.no.acl.found=true
#----
#关闭自动创建topic,默认true
auto.create.topics.enable=false
#log目录
log.dirs=/webapps/kafka/logs
#分区数
num.partitions=3
#topic的offset的备份份数
offsets.topic.replication.factor=3
transaction.state.log.replication.factor=1
transaction.state.log.min.isr=1
#默认备份份数,仅指自动创建的topics
default.replication.factor=3
#zookeeper连接串
zookeeper.connect=192.168.1.101:12181,192.168.1.102:12181,192.168.1.103:12181
3.增加安全认证信息文件kafka_server_jaas.conf
3台服务器相同
KafkaServer 块中,是kafka设置的认证信息
Client 块中,是zookeeper的认证信息,用于kafka连接zookeeper
vi /webapps/kafka/config/kafka_server_jaas.conf
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="admin"
password="admin"
user_kafka="kafka";
};
Client {
org.apache.kafka.common.security.plain.PlainLoginModule required
username="kafka"
password="kafka";
};
4.配置kafka启动环境变量
vi /webapps/kafka/bin/kafka-run-class.sh
#增加
export KAFKA_HEAP_OPTS="-Xmx2G -Xms2G -Djava.security.auth.login.config=/webapps/kafka/config/kafka_server_jaas.conf"
5.启动
bin/kafka-server-start.sh -daemon config/server.properties