命令行生产消息到Kerberos认证的kafka

#zookeeper 192.168.0.187:2181,192.168.0.162:2181,192.168.0.71:2181/kafka
#kafka 192.168.0.140:21007,192.168.0.71:21007


kafka-topics.sh --create --zookeeper 192.168.0.187:2181,192.168.0.162:2181,192.168.0.71:2181/kafka --replication-factor 1 --partitions 3 --topic kafka-taos
kafka-topics.sh  --list --zookeeper 192.168.0.187:2181,192.168.0.162:2181,192.168.0.71:2181:2181/kafka

# 进入到Kafka目录下
kafka-console-producer.sh --broker-list 192.168.0.140:21007,192.168.0.71:21007 --topic kafka-taos --producer.config  config/producer.properties
kafka-console-consumer.sh --topic kafka-taos  --bootstrap-server 192.168.0.140:21007,192.168.0.71:21007 --from-beginning --consumer.config config/consumer.properties




#producer.properties
security.protocol = SASL_PLAINTEXT
kerberos.domain.name = hadoop.d6688299_d292_4408_9287_eeb18a4a43e6.com
bootstrap.servers = 192.168.0.140:21007,192.168.0.71:21007
sasl.kerberos.service.name = kafka

#consumer.properties
security.protocol = SASL_PLAINTEXT
kerberos.domain.name = hadoop.d6688299_d292_4408_9287_eeb18a4a43e6.com
group.id = example-group1
auto.commit.interval.ms = 60000
sasl.kerberos.service.name = kafka

你可能感兴趣的:(开发随笔,kafka)