kafka集群添加kerberos认证demo

这里kafka版本是0.10.0,版本有点老,不过方法大同小异;

    当kafka开启Kerberos认证后,如何使用java API生产或消费数据呢?其实就是在生产消费者的代码中加入jaas、keytab这些认证有关的配置,下面我们直接看代码:

1.其实连接Kerberos集群很简单,需要下面三个文件:

    1).KerberosServer的配置文件krb5.conf,让程序知道我应该哪个kdc去登录认证;

[libdefaults]  udp_preference_limit = 1   renew_lifetime = 3650d  forwardable = true  default_realm = CHINAUNICOM  ticket_lifetime = 3650d  dns_lookup_realm = false  dns_lookup_kdc = false  default_ccache_name = /tmp/krb5cc_%{uid}  #default_tgs_enctypes = aes des3-cbc-sha1 rc4 des-cbc-md5  #default_tkt_enctypes = aes des3-cbc-sha1 rc4 des-cbc-md5[domain_realm]  .CHINAUNICOM = CHINAUNICOM[logging]  default = FILE:/var/log/krb5kdc.log  admin_server = FILE:/var/log/kadmind.log  kdc = FILE:/var/log/krb5kdc.log[realms]  CHINAUNICOM = {    admin_server = master98.hadoop.ljs    kdc = master98.hadoop.ljs  }
 

 2).认证肯定需要指定认证方式这里需要一个jaas.conf文件,一般集群的conf目录下都有;

KafkaClient {    com.sun.security.auth.module.Krb5LoginModule required    useKeyTab=true    keyTab="D:\\kafkaSSL\\kafka.service.keytab"    storeKey=true    useTicketCache=false    principal="kafka/salver32.hadoop.unicom@CHINAUNICOM"    serviceName=kafka;};
 

 3).就是用户的登录认证票据和认证文件,票据和keytab文件这里就不在贴了;

2.pom.xml文件依赖



	4.0.0
	
		org.springframework.boot
		spring-boot-starter-parent
		2.3.12.RELEASE
	

	org.fline
	ppp
	1.0-SNAPSHOT

	ppp
	ppp项目

	
		UTF-8
		1.8
		1.8
	

	
		
			org.springframework.boot
			spring-boot-starter-web
			
				
					log4j-to-slf4j
					org.apache.logging.log4j
				
				
					logback-classic
					ch.qos.logback
				
			
		
		

		
		
			io.springfox
			springfox-swagger2
			2.9.2
		

		
		
			io.springfox
			springfox-swagger-ui
			2.9.2
		

		
		
			org.apache.kafka
			kafka-clients
			2.7.0
		
		
			org.apache.kafka
			kafka-streams
		
		
			org.springframework.kafka
			spring-kafka
		
		
			cn.hutool
			hutool-all
			4.6.2
		
	

	
		
			
			
			
			
		
	


 

3.Java生产者发送消息,代码实例:

package com.fline.kafka.customer;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import java.util.Date;
import java.util.Properties;

/**
 * @Author panrenqing
 * @Date 2022/11/4 16:49
 * @Version 1.0
 */
public class KafkaKerberosProducer3 {

    public static final String krb5Conf="D:\\kafkaSSL\\krb5.conf";
    public static final String kafkaJaasConf="D:\\kafkaSSL\\kafka_client_jaas.conf";
    public static final String bootstrapServers="47.92.170.121:9092,47.92.166.91:9092,47.92.162.206:9092";
    public static final String topic="topic1";
    private static long count =5;

    public static void main(String[] args) {
        //Kerberos认证必须添加
//        System.setProperty("java.security.krb5.conf", krb5Conf);
//        System.setProperty("java.security.auth.login.config", kafkaJaasConf);
        System.setProperty("java.security.auth.login.config", ".\\src\\main\\resources\\kerberos\\kafka-client-jaas.conf");
        System.setProperty("java.security.krb5.conf", ".\\src\\main\\resources\\kerberos\\krb5.conf");

        Properties props = new Properties();
        props.put("bootstrap.servers", bootstrapServers);
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        //Kerberos认证必须添加以下三行
        props.put("security.protocol", "SASL_PLAINTEXT");
        props.put("sasl.kerberos.service.name", "kafka");
        props.put("sasl.mechanism", "GSSAPI");
        org.apache.kafka.clients.producer.KafkaProducer kafkaProducer = new org.apache.kafka.clients.producer.KafkaProducer<>(props);
        int i=1;
        while (true){
            String message = "{\"id\":" + i + ",\"ip\":\"192.168.0." + i + "\",\"date\":" + new Date().toString() + "}";
            System.out.println(message);
            kafkaProducer.send(new ProducerRecord(topic, message));
            try {
                Thread.sleep(200);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
            if(i++>count){
                break;
            }
        }


    }

}

4.Java消费者接收消息,代码实例:

package com.hadoop.ljs.kafka010.security;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import java.util.Arrays;
import java.util.Properties;
/**
 * @author: Created By lujisen
 * @company ChinaUnicom Software JiNan
 * @date: 2020-02-28 15:04
 * @version: v1.0
 * @description: com.hadoop.ljs.kafka010.security
 */
public class KafkaKerberosConsumer {
    public static final String krb5Conf="D:\\kafkaSSL\\krb5.conf";
    public static final String kafkaJaasConf="D:\\kafkaSSL\\kafka_client_jaas.conf";
    public static final String bootstrapServers="salver31.hadoop.ljs:6667,salver32.hadoop.ljs:6667";
    public static final String topic="topic1";
    public static final String comsumerGroup="group_topic1";

    public static void main(String[] args) {
        /*kerberos认证,需要添加以下两行*/
        System.setProperty("java.security.krb5.conf", krb5Conf);
        System.setProperty("java.security.auth.login.config", kafkaJaasConf);

        Properties props = new Properties();
        props.put("bootstrap.servers", bootstrapServers);
        props.put("group.id", comsumerGroup);
        props.put("enable.auto.commit", "false");
        props.put("auto.commit.interval.ms", "1000");
        props.put("auto.offset.reset", "earliest");
        props.put("session.timeout.ms", "30000");
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        /*kerberos认证,需要添加以下三个属性*/
        props.put("security.protocol", "SASL_PLAINTEXT");
        props.put("sasl.mechanism", "GSSAPI");
        props.put("sasl.kerberos.service.name", "kafka");

        KafkaConsumer kafkaConsumer = new KafkaConsumer<>(props);
        kafkaConsumer.subscribe(Arrays.asList(topic));
        while (true) {
            ConsumerRecords records = kafkaConsumer.poll(1);
            for (ConsumerRecord record : records)
                System.out.println("Partition: " + record.partition() + " Offset: " + record.offset() + " Value: " + record.value() + " ThreadID: " + Thread.currentThread().getId());
        }
    }
}

 

你可能感兴趣的:(kafka,java,大数据)