Kerberos可以将认证的密钥在集群部署时事先放到可靠的节点上。集群运行时,集群内的节点使用密钥得到认证,认证通过后的节点才能提供服务。企图冒充的节点由于没有事先得到的密钥信息,无法与集群内部的节点通信。这样就防止了恶意地使用或篡改Hadoop集群的问题,确保了Hadoop集群的可靠性、安全性。
官网下载地址 https://archive.apache.org/dist/kafka/2.6.0/kafka_2.12-2.6.0.tgz
#内容如下
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/security/keytab/kafka.keytab"
principal="kafka/[email protected]";
serviceName="kafka"
};
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
useTicketCache=true
keyTab="/etc/security/keytab/kafka.keytab"
principal="kafka/[email protected]";
serviceName="kafka"
};
Client {
com.sun.security.auth.module.Krb5LoginModule required
serviceName="zookeeper"
useKeyTab=true
useTicketCache=true
keyTab="/etc/security/keytab/zk.service.keytab"
principal="zookeeper/[email protected]";
};
KakfaServer配置代表了kafka集群中Server彼此间通信所使用的通信认证方式。
Client配置代表了kafka集群中Server与Zookeeper通信时的认证方式。
KafkaClient配置代表了命令行客户端与kafka集群通信时的认证方式。
advertised.listeners=SASL_PLAINTEXT://hadoop-001:9092 #对应主机名称
listeners=SASL_PLAINTEXT://hadoop-001:9092 #对应主机名称
security.inter.broker.protocol=SASL_PLAINTEXT
sasl.mechanism.inter.broker.protocol=GSSAPI
sasl.enabled.mechanisms=GSSAPI
sasl.kerberos.service.name=kafka
# Add acl
allow.everyone.if.no.acl.found=false
auto.create.topics.enable=false
delete.topic.enable=true
super.users=User:kafka
# Add class
authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer
zookeeper.connect=hadoop-001:11001,hadoop-002:11001,hadoop-003:11001
if [ -z "$KAFKAJVMPERFORMANCEOPTS" ]; then KAFKAJVMPERFORMANCEOPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true -Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/bdspace/kafka/config/kafka-jaas.conf " fi
security.protocol = SASL_PLAINTEXT
sasl.mechanism = GSSAPI
sasl.kerberos.service.name =kafka
security.protocol = SASL_PLAINTEXT
sasl.mechanism = GSSAPI
sasl.kerberos.service.name=kafka
#kafka开启kerberos认证,为kafka服务配置认证配置文件
export KAFKA_OPTS="-Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/bdspace/kafka/config/kafka-jaas.conf"
#kakfa添加认证机制 --consumer-property security.protocol=SASL_PLAINTEXT
exec $(dirname $0)/kafka-run-class.sh kafka.tools.ConsoleConsumer --consumer-property security.protocol=SASL_PLAINTEXT "$@"
export KAFKA_OPTS="-Djava.security.krb5.conf=/etc/krb5.conf -Djava.security.auth.login.config=/bdspace/kafka/config/kafka-jaas.conf"
exec $(dirname $0)/kafka-run-class.sh kafka.admin.TopicCommand "$@"
kafka/bin/kafka-server-start.sh config/server.properties
kafka/bin/kafka-console-producer.sh --broker-list hadoop-001:9092 --topic test --producer.config config/producer.properties
kafka/bin/kafka-console-consumer.sh --bootstrap-server hadoop-001:9092 --topic test --consumer.config config/consumer.properties
在配置好Kerberos后,启动Zookeeper集群和Kafka集群之后,就可以使用kafka-acls.sh脚本来操作ACL机制。
#查看ACL授权
kafka-acls.sh --list --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181
#创建授权主题
kafka-topics.sh --create --zookeeper zk1:2181,zk2:2181,zk3:2181 --replication-factor 3 --partitions 3 --kafka-acl-topic
#赋权生产者操作
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181 --add --allow-principal User:username --operation Write --topic kafka-acl-topic
#赋权消费者操作
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181 --add --allow-principal User:username --operation Read --topic kafka-acl-topic
#赋权消费者组操作
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181 --add --allow-principal User:username --operation Read --group user-group
#赋权禁止生产者操作
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181 --add --deny-principal User:username --operation Write --topic kafka-acl-topic
#删除:通过remove参数来回收相关权限
kafka-acls.sh --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181 --remove --allow-principal User:username --operation Write --topic kafka-acl-topic
kafka-acls.sh --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181 --remove --deny-principal User:username --operation Write --topic kafka-acl-topic
#kafka用户执行
kafka-topics.sh --create --zookeeper zk1:2181,zk2:2181,zk3:2181 --replication-factor 3 --partitions 3 --topic kafka-user
#kafka用户执行
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181 --add --allow-principal User:username --operation Write --topic kafka-user
kafka-acls.sh --authorizer kafka.security.auth.SimpleAclAuthorizer --authorizer-properties zookeeper.connect=zk1:2181,zk2:2181,zk3:2181 --add --allow-principal User:username --operation Read --group user-group
#生产
kafka/bin/kafka-console-producer.sh --broker-list hadoop-001:9092 --topic test --producer.config config/producer.properties
#消费
kafka/bin/kafka-console-consumer.sh --bootstrap-server hadoop-001:9092 --topic test --consumer.config config/consumer.properties