Kafka 4.0 SCRAM认证 集群搭建

服务器

hostname       ip
test01 192.168.12.101
test02         192.168.12.102
test03         192.168.12.103

软件版本

软件名    版本
linux  Rocky Linux 8.6 (Green Obsidian)
java openjdk-17.0.1
kafka 2.13-4.0.0

软件下载

软件安装

java,kafka都只需解压,解压后重命名一下

软件   目录
java /opt/program/jdk
kafka(Controller/KRaft) /opt/program/kafkac
kafka(Broker) /opt/program/kafkab

软件配置

系统相关配置

# 分别在三台机器上执行
 
echo "192.168.12.101     test01" >> /etc/hosts
echo "192.168.12.102     test02" >> /etc/hosts
echo "192.168.12.103     test03" >> /etc/hosts

java配置

# 分别在三台机器上执行
 
echo "export JAVA_HOME=/opt/program/jdk" >> /etc/profile
echo "export PATH=$PATH:$JAVA_HOME/bin" >> /etc/profile
 
source /etc/profile

Kafka Controller配置

在test01上配置/opt/program/kafkac/config/server-plain.properties

node.id=1
process.roles=controller
[email protected]:9093,[email protected]:9093,[email protected]:9093

listeners=CONTROLLER://:9093
advertised.listeners=CONTROLLER://192.168.12.101:9093

listener.security.protocol.map=CONTROLLER:SASL_PLAINTEXT
controller.listener.names=CONTROLLER
 
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.controller.protocol=PLAIN
 
super.users=User:admin
allow.everyone.if.no.acl.found=true
authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer

log.dirs=/opt/program/kafkac/data

在test02上配置/opt/program/kafkac/config/server-plain.properties

node.id=2
process.roles=controller
[email protected]:9093,[email protected]:9093,[email protected]:9093

listeners=CONTROLLER://:9093
advertised.listeners=CONTROLLER://192.168.12.102:9093

listener.security.protocol.map=CONTROLLER:SASL_PLAINTEXT
controller.listener.names=CONTROLLER
 
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.controller.protocol=PLAIN
 
super.users=User:admin
allow.everyone.if.no.acl.found=true
authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer

log.dirs=/opt/program/kafkac/data

在test03上配置/opt/program/kafkac/config/server-plain.properties

node.id=3
process.roles=controller
[email protected]:9093,[email protected]:9093,[email protected]:9093

listeners=CONTROLLER://:9093
advertised.listeners=CONTROLLER://192.168.12.103:9093

listener.security.protocol.map=CONTROLLER:SASL_PLAINTEXT
controller.listener.names=CONTROLLER
 
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.controller.protocol=PLAIN
 
super.users=User:admin
allow.everyone.if.no.acl.found=true
authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer

log.dirs=/opt/program/kafkac/data

在test01、test02、test03上配置/opt/program/kafkac/config/jaas-plain.conf

KafkaServer {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    serviceName="kafka"
    username="admin"
    password="abc123456"
    user_admin="abc123456";
};
 
KafkaClient {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="admin"
    password="abc123456"
    user_admin="abc123456";
};

在test01、test02、test03上配置/opt/program/kafkac/bin/kafka-server-start-plain.sh

cd /opt/program/kafkac
rm -f bin/kafka-server-start-plain.sh
cp bin/kafka-server-start.sh bin/kafka-server-start-plain.sh
sed -i '16i export KAFKA_OPTS=" -Djava.security.auth.login.config=/opt/program/kafkac/config/jaas-plain.conf " $KAFKA_OPTS' bin/kafka-server-start-plain.sh

Kafka Broker配置

在test01上配置/opt/program/kafkab/config/server-plain.properties

node.id=4
process.roles=broker
[email protected]:9093,[email protected]:9093,[email protected]:9093

listeners=BROKER://:9092
advertised.listeners=BROKER://192.168.12.101:9092
 
listener.security.protocol.map=BROKER:SASL_PLAINTEXT,CONTROLLER:SASL_PLAINTEXT
inter.broker.listener.name=BROKER
controller.listener.names=CONTROLLER
 
sasl.enabled.mechanisms=PLAIN
sasl.mechanism.inter.broker.protocol=PLAIN
sasl.mechanism.controller.protocol=PLAIN
 
super.users=User:admin
allow.everyone.if.no.acl.found=true
authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer
 
log.dirs=/opt/program/kafkab/data

在test01上配置/opt/program/kafkab/config/jaas-plain.conf

KafkaServer {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    serviceName="kafka"
    username="admin"
    password="abc123456"
    user_admin="abc123456";
};
 
KafkaClient {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="admin"
    password="abc123456"
    user_admin="abc123456";
};

在test01上配置/opt/program/kafkab/config/sasl-plain.conf

security.protocol=SASL_PLAINTEXT
sasl.mechanism=PLAIN
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="admin" password="abc123456";

在test01上配置/opt/program/kafkab/bin/kafka-server-start-plain.sh

cd /opt/program/kafkab
rm -f bin/kafka-server-start-plain.sh
cp bin/kafka-server-start.sh bin/kafka-server-start-plain.sh
sed -i '16i export KAFKA_OPTS=" -Djava.security.auth.login.config=/opt/program/kafkab/config/jaas-plain.conf " $KAFKA_OPTS' bin/kafka-server-start-plain.sh

在test01上配置/opt/program/kafkab/config/server-scram.properties

node.id=4
process.roles=broker
[email protected]:9093,[email protected]:9093,[email protected]:9093

listeners=BROKER://:9092
advertised.listeners=BROKER://192.168.12.101:9092
 
listener.security.protocol.map=BROKER:SASL_PLAINTEXT,CONTROLLER:SASL_PLAINTEXT
inter.broker.listener.name=BROKER
controller.listener.names=CONTROLLER
 
sasl.enabled.mechanisms=SCRAM-SHA-512
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-512
sasl.mechanism.controller.protocol=PLAIN
 
super.users=User:admin
allow.everyone.if.no.acl.found=true
authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer
 
log.dirs=/opt/program/kafkab/data

在test02上配置/opt/program/kafkab/config/server-scram.properties

node.id=5
process.roles=broker
[email protected]:9093,[email protected]:9093,[email protected]:9093

listeners=BROKER://:9092
advertised.listeners=BROKER://192.168.12.102:9092
 
listener.security.protocol.map=BROKER:SASL_PLAINTEXT,CONTROLLER:SASL_PLAINTEXT
inter.broker.listener.name=BROKER
controller.listener.names=CONTROLLER
 
sasl.enabled.mechanisms=SCRAM-SHA-512
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-512
sasl.mechanism.controller.protocol=PLAIN
 
super.users=User:admin
allow.everyone.if.no.acl.found=true
authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer
 
log.dirs=/opt/program/kafkab/data

在test03上配置/opt/program/kafkab/config/server-scram.properties

node.id=6
process.roles=broker
[email protected]:9093,[email protected]:9093,[email protected]:9093

listeners=BROKER://:9092
advertised.listeners=BROKER://192.168.12.103:9092
 
listener.security.protocol.map=BROKER:SASL_PLAINTEXT,CONTROLLER:SASL_PLAINTEXT
inter.broker.listener.name=BROKER
controller.listener.names=CONTROLLER
 
sasl.enabled.mechanisms=SCRAM-SHA-512
sasl.mechanism.inter.broker.protocol=SCRAM-SHA-512
sasl.mechanism.controller.protocol=PLAIN
 
super.users=User:admin
allow.everyone.if.no.acl.found=true
authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer
 
log.dirs=/opt/program/kafkab/data

在test01、test02、test03上配置/opt/program/kafkab/config/jaas-scram.conf

KafkaServer {
    org.apache.kafka.common.security.scram.ScramLoginModule required
    serviceName="kafka"
    username="admin"
    password="abc123456"
    user_admin="abc123456";
};
 
KafkaClient {
    org.apache.kafka.common.security.plain.PlainLoginModule required
    username="admin"
    password="abc123456"
    user_admin="abc123456";
};

在test01、test02、test03上配置/opt/program/kafkab/bin/kafka-server-start-scram.sh

cd /opt/program/kafkab
rm -f bin/kafka-server-start-scram.sh
cp bin/kafka-server-start.sh bin/kafka-server-start-scram.sh
sed -i '16i export KAFKA_OPTS=" -Djava.security.auth.login.config=/opt/program/kafkab/config/jaas-scram.conf " $KAFKA_OPTS' bin/kafka-server-start-scram.sh

在test01上配置/opt/program/kafkab/config/sasl-scram.conf

security.protocol=SASL_PLAINTEXT
sasl.mechanism=SCRAM-SHA-512
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="admin" password="abc123456";

软件启动

格式化数据目录

# 在三台服务器的Controller目录中执行
cd /opt/program/kafkac
# 格式化raft数据目录,test是集群名称,随意
bin/kafka-storage.sh format -t test -c config/server-plain.properties

启动Controller

# 在三台服务器的Controller目录中执行
cd /opt/program/kafkac
# 启动Controller
bin/kafka-server-start-plain.sh -daemon config/server-plain.properties

创建SCRAM认证的管理员账号

# 在test01上执行
cd /opt/program/kafkab
# 格式化数据目录
bin/kafka-storage.sh format -t test -c config/server-plain.properties
# 启动broker
bin/kafka-server-start-plain.sh -daemon config/server-plain.properties

# 查看日志目录
tail -10 logs/server.log
# 日志最后会显示Kafka Server started

# 等服务启动后,创建管理员账号
bin/kafka-configs.sh --bootstrap-server 172.21.12.203:9092 --command-config config/sasl-plain.conf \
--alter --entity-type users \
--entity-name admin --add-config 'SCRAM-SHA-512=[password=abc123456]'

# 查看已经创建的账号
bin/kafka-configs.sh --bootstrap-server 172.21.12.203:9092 --command-config config/sasl-plain.conf \
--describe --entity-type users 

# 停止这个进程
ps -ef |grep java |grep kafkab |awk '{print "kill -9 "$2}' | sh

启动Broker

# 在三台服务器的Broker目录中执行
cd /opt/program/kafkab

# 在test02和test03上执行
# 格式化raft数据目录,test是集群名称是前面启动Controller的集群名称
bin/kafka-storage.sh format -t test -c config/server-scram.properties

# 在三台服务器上执行,启动Broker
bin/kafka-server-start-scram.sh -daemon config/server-scram.properties

# 查看日志
tail -100f logs/server.log

集群验证

创建topic

# 在test01中操作
 
# 创建topic
bin/kafka-topics.sh --bootstrap-server 192.168.12.101:9092 --command-config config/sasl-scram.conf --create --partitions 3 --replication-factor 3 --topic test1
 
# 查看topic
bin/kafka-topics.sh --bootstrap-server 192.168.12.101:9092 --command-config config/sasl-scram.conf --describe --topic test1

创建账号

# 在test01中操作
 
# 创建生产者用户
bin/kafka-configs.sh --bootstrap-server 192.168.12.101:9092 --command-config config/sasl-scram.conf \
--alter --entity-type users \
--entity-name producer --add-config 'SCRAM-SHA-512=[password=abc123456]'
 
# 创建消费者用户
bin/kafka-configs.sh --bootstrap-server 192.168.12.101:9092 --command-config config/sasl-scram.conf \
--alter --entity-type users \
--entity-name consumer --add-config 'SCRAM-SHA-512=[password=abc123456]'

授权

# 在test01中操作
 
# 授予producer权限
bin/kafka-acls.sh --bootstrap-server 192.168.12.101:9092 --command-config config/sasl-scram.conf \
--add --allow-principal User:producer --producer --topic test1
 
# 授予consumer权限
bin/kafka-acls.sh --bootstrap-server 192.168.12.101:9092 --command-config config/sasl-scram.conf \
--add --allow-principal User:consumer --consumer --topic test1 --group test1

生产数据

# 在test01中操作
 
# 配置认证配置
>     config/sasl-scram-producer.conf
cat > config/sasl-scram-producer.conf << EOF
security.protocol=SASL_PLAINTEXT
sasl.mechanism=SCRAM-SHA-512
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="producer" password="abc123456";
EOF
 
# 生产数据
bin/kafka-console-producer.sh --bootstrap-server 192.168.12.101:9092 --producer.config config/sasl-scram-producer.conf --topic test1

消费数据

# 在test01中操作
 
# 创建认证配置
>     config/sasl-scram-consumer.conf
cat > config/sasl-scram-consumer.conf << EOF
security.protocol=SASL_PLAINTEXT
sasl.mechanism=SCRAM-SHA-512
sasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username="consumer" password="abc123456";
EOF
 
# 消费数据
bin/kafka-console-consumer.sh --bootstrap-server 192.168.12.101:9092 --consumer.config config/sasl-scram-consumer.conf --topic test1 --group test1 --from-beginning

在生产者的窗口随意输入字符,会在消费者窗口中打印出来,说明运行正常

你可能感兴趣的:(kafka,分布式)