kafka、zookeeper配置sasl认证

1:配置zookeeper

zoo.cfg 增加以下配置:

authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
requireClientAuthScheme=sasl
jaasLoginRenew=3600000
quorum.auth.enableSasl=true # 打开sasl开关, 默认是关的
quorum.auth.learnerRequireSasl=true # ZK做为leaner的时候, 会发送认证信息
quorum.auth.serverRequireSasl=true # 设置为true的时候,learner连接的时候需要发送认证信息,否则拒绝
quorum.auth.learner.loginContext=QuorumLearner # JAAS 配置里面的 Context 名字
quorum.auth.server.loginContext=QuorumServer # JAAS 配置里面的 Context 名字
quorum.cnxn.threads.size=20 # 建议设置成ZK节点的数量乘2

创建 zk_server_jaas.conf 文件

# QuorumServer 和 QuorumLearner 都是配置的ZK节点之间的认证配置, 
# 我们叫他 Server-to-Server authentication, 并不影响 Kafka 的连接认证.
# Server 是配置的Kafka连接需要的认证信息, 我们叫他 Client-to-Server authentication
Server {
    org.apache.zookeeper.server.auth.DigestLoginModule required
    username=admin # zookeeper之间的认证用户名
    password=admin # zookeeper之间的认证密码
    user_kafka=admin # 为kafka服务创建账号密码:用户名kafka,密码admin
    user_producer=admin; # 根据实际情况增加用户,这里增加一个用户名为producer,密码为admin的用户
};
QuorumServer {
       org.apache.zookeeper.server.auth.DigestLoginModule required
       user_zookeeper="zookeeper@password"; # 用户名为zookeeper,密码为zookeeper@password
};
QuorumLearner {
       org.apache.zookeeper.server.auth.DigestLoginModule required
       username="zookeeper"
       password="zookeeper@password";
};

将zk_server_jaas.conf与zoo.cfg 放在一起
启动zookeeper 命令:

docker run -d -p 2181:2181 -p 2888:2888 -p 3888:3888 --name zookeeper --restart always \
-v /path/to/zookeeper/conf:/opt/zookeeper-3.4.13/conf \
-v /data/zookeeper/data:/data \
-v /data/zookeeper/datalog:/datalog \
-e "TZ=Asia/Shanghai" \
-e "SERVER_JVMFLAGS=-Djava.security.auth.login.config=/opt/zookeeper-3.4.13/conf/zk_server_jaas.conf" \
wurstmeister/zookeeper
2:配置kafka

创建 kafka_server_jaas.conf :

KafkaServer {
    org.apache.kafka.common.security.plain.PlainLoginModule required
	    # username和password是broker用于初始化连接到其他的broker
        username="admin"
        password="admin" 
        # 下面定义了所有连接到broker和broker验证的所有的客户端连接包括其他broker的用户密码
        user_admin="admin"   
        user_alice="alice"; # 这里增加了alice用户,密码alice
};
Client {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="admin" # 这里是kafka客户端连接broker的用户名
    password="admin"; # 这里是kafka客户端连接broker的密码
    # 上面应该是对应KafkaServer中的user_admin配置项
};

在 /path/to/kafka/config目录下创建server.properties
配置 server.properties:

keeper.set.acl=true

只需配置上面一条就行了,kafka在启动时会将命令中指定的配置写到此文件中。

启动kafka docker-compose.yml :

version: '2'
services:
  kafka1:
    image: wurstmeister/kafka
    container_name: kafka1
    hostname: broker1
    network_mode: host
    environment:
        KAFKA_BROKER_ID: 1 
        KAFKA_ZOOKEEPER_CONNECT: zk0:2181,zk1:2181,zk2:2181
        KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
        KAFKA_LISTENERS: SASL_PLAINTEXT://yourHostIP:19092
        KAFKA_ADVERTISED_LISTENERS: SASL_PLAINTEXT://yourHostIP:19092
        KAFKA_SECURITY_INTER_BROKER_PROTOCOL: SASL_PLAINTEXT
        KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
        KAFKA_SASL_ENABLED_MECHANISMS: PLAIN
        KAFKA_OPTS: -Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf
        KAFKA_PORT: 19092
    volumes:
        - /path/to/kafka/config:/opt/kafka/config
        - /data/kafka-sasl/logs:/kafka/logs

由于挂载了 /path/to/kafka/config 目录,而目录下没有log4j.properties文件,kafka在启动时也不会创建此文件,所以需要从其它地方拷贝过来一个log4j.properties文件。

[root@my-centos kafka]# ls config/
kafka_server_jaas.conf  log4j.properties  server.properties

启动kafka:

[root@my-centos kafka]# docker-compose up -d
3:使用golang连接kafka
package main

import (
    "fmt"
    "github.com/Shopify/sarama"
    "time"
)

func connKafka(nodes []string) {
    config := sarama.NewConfig()
    config.Net.SASL.Enable = true
    config.Net.SASL.User = "admin"
    config.Net.SASL.Password = "admin"
    config.Net.DialTimeout = 2 * time.Second
    config.Metadata.Retry.Max = 1
    now := time.Now()
    client, err := sarama.NewClient(nodes, config)
    if err != nil {
        fmt.Println("connect time: ", time.Since(now).Seconds())
        panic(err)
    }
    defer client.Close()
    //fmt.Println("client: ", client)
    topics, err := client.Topics()
    if err != nil {
        panic(err)
    }
    for _, topic := range topics {
       fmt.Println("topic: ", topic)
    }
}

func main() {
    nodes := []string{"kafka0:19092", "kafka1:19092", "kafka2:19092", "kafka3:19092", "kafka4:19092"}
    connKafka(nodes)
}

参考文章:
http://ohmycat.me/2019/05/08/kafka-with-zookeeper-authentication.html
https://access.redhat.com/documentation/en-us/red_hat_amq/7.2/html/using_amq_streams_on_red_hat_enterprise_linux_rhel/configuring_zookeeper

你可能感兴趣的:(kafka,golang)