Kafka使用

pom

		<dependency>
            <groupId>org.apache.kafkagroupId>
            <artifactId>kafka-clientsartifactId>
            <version>3.3.1version>
        dependency>

        <dependency>
            <groupId>log4jgroupId>
            <artifactId>log4jartifactId>
            <version>1.2.17version>
        dependency>

        <dependency>
            <groupId>org.slf4jgroupId>
            <artifactId>slf4j-apiartifactId>
            <version>1.7.19version>
        dependency>

        <dependency>
            <groupId>org.slf4jgroupId>
            <artifactId>slf4j-log4j12artifactId>
            <version>1.7.25version>
        dependency>

        <dependency>
            <groupId>org.apache.commonsgroupId>
            <artifactId>commons-lang3artifactId>
            <version>3.9version>
        dependency>

kafka客户端管理器代码

/**
 * kafka管理client
 * @author
 * @create 2023-05-31 22:06
 */
public class KafkaTopicDML {
    public static void main(String[] args) throws ExecutionException, InterruptedException {
        Properties properties = new Properties();
        properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,"127.0.0.1:9092");
//        properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,"127.0.0.1:9092,127.0.0.1:9093,127.0.0.1:9094");
        KafkaAdminClient client = (KafkaAdminClient) KafkaAdminClient.create(properties);

        //创建topic信息
        /*
         *主题,分区,副本因子
         */
        CreateTopicsResult createTopicsResult = client.createTopics(Arrays.asList(new NewTopic("topic02", 3, (short) 3)));
        createTopicsResult.all().get();//异步创建改成同步创建

        /**
         * 打印kafka信息
         */
        //主题列表
        ListTopicsResult topicsResult = client.listTopics();
        Set<String> names = topicsResult.names().get();
        for (String name : names) {
            System.out.println(name);
        }

        //删除topic
        DeleteTopicsResult deleteTopics = client.deleteTopics(Arrays.asList("topic02"));
        deleteTopics.all().get();//异步变同步

        //查看topic详细信息
//        DescribeTopicsResult describeTopicsResult = client.describeTopics(Arrays.asList("topic02"));
//        Map topicDescriptionMap = describeTopicsResult.all().get();//异步变同步
//        Set> entries = topicDescriptionMap.entrySet();
//        for (Map.Entry entry : entries) {
//            System.out.println(entry.getKey());
//            System.out.println(entry.getValue());
//            System.out.println("=============");
//        }

        //关闭KafkaAdminClient
        client.close();
    }
}

生产者

public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
        prop.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        prop.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);

        KafkaProducer<String, String> producer = new KafkaProducer<>(prop);

       try {
            //构建消息
            ProducerRecord<String, String> record = new ProducerRecord<>("my-topic","my-key","yuyang");
//             ProducerRecord record = new ProducerRecord("my-topic",1,"my-key","yuyang");//指定分区
           
            //发送消息
            producer.send(record);

            log.info("message is sent");
//            System.out.println("message is sent");
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            producer.close();
        }
    }

消费者

public static void main(String[] args) {
        Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        prop.put(ConsumerConfig.GROUP_ID_CONFIG,"group01");//消费者组

        //2.创建Topic消费者
        KafkaConsumer<String,String> consumer=new KafkaConsumer<String, String>(prop);

        //3.调用消费者拉取消息   订阅主题
        consumer.subscribe(Collections.singletonList("my-topic"));
        //3.订阅topic开头的消息队列
//        consumer.subscribe(Pattern.compile("^topic.*$"));

//        List partitions = Arrays.asList(new TopicPartition("topic", 0));
//        consumer.assign(partitions);
//        //指定消费分区
//        consumer.seekToBeginning(partitions);//从头开始消费
        consumer.seek(new TopicPartition("my-topic",0),1);//从头开始消费

        while (true) {
        	//每隔1秒拉取一次消息
            ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
            if (!consumerRecords.isEmpty()) {
                Iterator<ConsumerRecord<String, String>> iterator = consumerRecords.iterator();
                while (iterator.hasNext()) {
                    //获取一个消息
                    ConsumerRecord<String, String> record = iterator.next();
                    String topic = record.topic();
                    System.out.println("topic = " + topic);
                    int partition = record.partition();
                    System.out.println("partition = " + partition);
                    long offset = record.offset();
                    System.out.println("offset = " + offset);

                    String key = record.key();
                    System.out.println("key = " + key);
                    String value = record.value();
                    System.out.println("value = " + value);
                    long timestamp = record.timestamp();
                    System.out.println("timestamp = " + timestamp);
                }
            }
        }
    }

消费者自定义分区

        KafkaConsumer<String,String> consumer=new KafkaConsumer<String, String>(prop);

        List<TopicPartition> partitions = Arrays.asList(new TopicPartition("topic", 0));
        consumer.assign(partitions);
        //指定消费分区的位置
        consumer.seekToBeginning(partitions);//从头开始消费
        
        //设置消费分区指定位置
//        consumer.seek(new TopicPartition("topic01",0),1);//从偏移量  开始消费

生产者分区策略

        //自定义分区策略
        prop.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, UserDefinePartitioner.class);

自定义分区类

public class UserDefinePartitioner implements Partitioner {
    private AtomicInteger counter = new AtomicInteger(0);
    /**
     * 返回分区号
     * @param topic
     * @param key
     * @param keybytes
     * @param value
     * @param valuebytes
     * @param cluster
     * @return
     */
    @Override
    public int partition(String topic, Object key, byte[] keybytes,
                         Object value, byte[] valuebytes, Cluster cluster) {
        //获取所有的分区
        List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topic);
        int numPartitions = partitionInfos.size();
        if (keybytes == null) {
            int addIncrement = counter.getAndIncrement();
            return (addIncrement&Integer.MAX_VALUE)%numPartitions;
        }else {
            return Utils.toPositive(Utils.murmur2(keybytes))%numPartitions;
        }
    }

    @Override
    public void close() {
        System.out.println("close");
    }

    @Override
    public void configure(Map<String, ?> map) {
        System.out.println("configure");
    }
}

生产者自定义拦截器

        prop.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, UserDefineInterceptor.class.getName());

自定义的拦截器类

public class UserDefineInterceptor implements ProducerInterceptor {

    @Override
    public ProducerRecord onSend(ProducerRecord producerRecord) {
        return new ProducerRecord(producerRecord.topic(),producerRecord.key(),producerRecord.value()+":===");
    }

    @Override
    public void onAcknowledgement(RecordMetadata metadata, Exception exception) {
        System.out.println("metadata = " + metadata);
        System.out.println("exception = " + exception);
    }

    @Override
    public void close() {
        System.out.println("close");
    }

    @Override
    public void configure(Map<String, ?> map) {
        System.out.println("configure");
    }
}

拦截器作用:
生产者发送消息时,对数据做一些修饰,装饰;确定消息的生命周期;获取消息的元信息

高级API

offset自动控制

prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");//(最早)自动将偏移量重置为最早偏移量
        
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"laster");//(最近)自动将偏移量重置为最新的偏移量
        
prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"none");//如果未找到消费者组的先前偏移量,则向消费者抛出异常

Kafka消费者在消费数据时默认会定期提交消费的偏移量,保证所有的消息至少可以被消费者消费一次,看通过以下两个参数配置

enable.auto.commit=true 默认 //配置offset自动提交
auto.commit.interval.ms=5000 默认

如果用户需要自己管理offset的自动提交,可以关闭offset的自动提交,手动管理offset提交的偏移量,手动提交的offset偏移量永远都要比本次消费的偏移量+1,因为提交的offset是kafka消费者下一次抓取数据的位置

        //配置offset自动提交时间   5s一次   自动提交offset
        prop.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,5000);
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,true);

手动提交

 Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        prop.put(ConsumerConfig.GROUP_ID_CONFIG,"group01");

        //./bin/kafka-topics.sh --bootstrap-server localhost:9092 --create --topic topic01 --partitions 3l--replication-factor 3
        prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");//从最早的位置

        //关闭 offset偏移量自动提交
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);

        //2.创建Topic消费者
        KafkaConsumer<String,String> consumer=new KafkaConsumer<String, String>(prop);
        //3.订阅topic开头的消息队列
        consumer.subscribe(Arrays.asList("my-topic"));

        while (true) {
            ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
            if (!consumerRecords.isEmpty()) {
                Iterator<ConsumerRecord<String, String>> iterator = consumerRecords.iterator();

                //记录分区的消费元数据信息
                Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>();

                while (iterator.hasNext()) {
                    //获取一个消息
                    ConsumerRecord<String, String> record = iterator.next();
                    String topic = record.topic();
                    System.out.println("topic = " + topic);
                    int partition = record.partition();
                    System.out.println("partition = " + partition);
                    long offset = record.offset();
                    System.out.println("offset = " + offset);

                    String key = record.key();
                    System.out.println("key = " + key);
                    String value = record.value();
                    System.out.println("value = " + value);
                    long timestamp = record.timestamp();
                    System.out.println("timestamp = " + timestamp);

                    //记录消费分区的偏移量元数据
                    offsetMap.put(new TopicPartition(topic,partition),new OffsetAndMetadata(offset));
                    //consumer同步提交  提交消费者偏移量
//                    consumer.commitSync(offsetMap);
                    //consumer异步提交  提交消费者偏移量
                    consumer.commitAsync(offsetMap, new OffsetCommitCallback() {
                        @Override
                        public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsetMap, Exception e) {
                            System.out.println("offsetMap = " + offsetMap + " ,Exception" + e);
                        }
                    });
                }
            }
        }

ACKS & Retries
Kafka使用_第1张图片
Kafka生产者在发送完一个消息后,要求Broker在规定的世界内ACK应答,如果没在规定时间内应答,Kafka生产者会尝试n此重发消息。

ACKS 默认1
acks=1 Leader会将record(消息记录)写到本地日志中,但不会等待所有的Follower完全确认的情况下做出响应。这种情况下,如果Leader在确认记录后立即失效,但在Follower复制记录之前失败,则记录将丢失。
acks=0 生产者根本不会等待服务器的确认,该记录立即添加到套接字缓冲区中并视为已发送。这种情况下,不保证服务器已收到该记录(应用于日志)
acks=-1 Leader将订单全部副本同步确认记录,这保证了只要有有一个同步副本处于活动状态,该记录就不会丢失。这个是最有力的保证。这等效于acks=all

如果生产者在规定时间内,没有得到Leader的ack应答,Kafka可以开启reties机制。
request.timeout.ms = 30000 默认
retries = 2147483647 默认

        prop.put(ProducerConfig.ACKS_CONFIG,-1);
        //重试次数, 不包含第一次发送
        prop.put(ProducerConfig.RETRIES_CONFIG,3);
        //将检测超时时间设置为1ms
        prop.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,1);

幂等写
幂等特性:不多不少,不丢不重
什么是幂等:一次和多次请求某一个资源对于资源本身应该具有同样的结果(网络超时等问题除外)。也就是说,其任意多次执行对资源本身所产生的影响均与一次执行的影响相同。幂等又称为exactly once

Kafka在0.11.0.0版本支持增加了对幂等的支持。幂等是针对生产者角度的特性。幂等可以保证上生产者发送的消息,不会丢失,而且不会重复。实现幂等的关键点就是服务端可以区分请求是否重复,过滤掉重复的请求。要区分请求是否重复的有两点:

唯一标识:要想区分请求是否重复,请求中就得有唯一标识。例如支付请求中,订单号就是唯一标识

记录下已处理过的请求标识:光有唯一标识还不够,还需要记录下那些请求是已经处理过的,这样当收到新的请求时,用新请求中的标识和处理记录进行比较,如果处理记录中有相同的标识,说明是重复记录,拒绝掉。

要停止多次处理消息,必须仅将其持久化到Kafka Topic中仅仅一次。在初始化期间,kafka会给生产者生成一个唯一的ID称为Producer ID或PID。

PID和序列号与消息捆绑在一起,然后发送给Broker。由于序列号从零开始并且单调递增,因此,仅当消息的序列号比该PID / TopicPartition对中最后提交的消息正好大1时,Broker才会接受该消息。如果不是这种情况,则Broker认定是生产者重新发送该消息。

enable.idempotence= false 默认
注意:在使用幂等性的时候,必须开启retries=true和acks=all

		//设置kafka  acks和retries
        prop.put(ProducerConfig.ACKS_CONFIG,-1);
        //不包含第一次发送,如果尝试发送3次;失败,则系统放弃发送
        prop.put(ProducerConfig.RETRIES_CONFIG,3);
        //检测超时的时间设置为1ms
        prop.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,1);

        //开启kafka的幂等性
        prop.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);
        //让记录严格有序
        //有多于n个记录未被应答,客户端会被阻塞
        prop.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION,1);

一个是唯一标识一个是开启kafka幂等的特性

kafka事务
kafka的幂等性,只能保证一条记录在分区发送的原子性,但要保证多条记录(多分区)之间的完整性,就需要开启kafka的事务操作了。

在Kafka0.11.0.0除了引入的幂等性的概念,同时也引入了事务的概念。通常kafka的事务分为生产者事务Only、消费者&生产者事务。默认消费者消费的消息级别是read_uncommited数据,这有可能读取到事务失败的数据,所有再开启生产者事务之后,需要用户设置消费者的事务隔离级别

isolation.level = read_uncommitted 默认
该选项有两个值read_committed|read_uncommitted,如果开始事务控制,消费端必须将事务的隔离级别设置为read_committed

开启的生产者事务时,只需要指定transactional.id属性即可,一旦开启了事务,默认生产者就已经开启了幂等。但要求transactional.id的取值必须是唯一的,同一时刻只能有一个transactional.id存储在,其他的将会被关闭。

生产者事务Only
生产者部分

		//1.创建链接参数
        Properties props=new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"CentOSA:9092,CentOSB:9092,CentOSC:9092");
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());

        //必须配置事务ID,必须是唯一的
        props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id"+ UUID.randomUUID().toString());
        //配置kafka批处理大小
        props.put(ProducerConfig.BATCH_SIZE_CONFIG,1024);
        //等待5ms如果batch中数量不足1024大小,也会发送
        props.put(ProducerConfig.LINGER_MS_CONFIG,5);
        //开启kafka的重试机制和幂等性
        props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);
        props.put(ProducerConfig.ACKS_CONFIG,-1);
        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,10000);//请求超时

        //2.创建生产者
        KafkaProducer<String,String> producer=new KafkaProducer<String, String>(props);
        producer.initTransactions();//初始化事务

        try{
            producer.beginTransaction();//开启事务
            //3.封账消息队列
            for(Integer i=0;i< 10;i++){
                Thread.sleep(10000);
                ProducerRecord<String, String> record = new ProducerRecord<>("topic01", "key" + i, "value" + i);
                producer.send(record);
                producer.flush();
            }
            producer.commitTransaction();//提交事务
        }catch (Exception e){
            producer.abortTransaction();//终止事务
        }
        producer.close();

消费者部分

        prop.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read-committed");		
		

消费者&生产者事务

public static void main(String[] args) {
        KafkaProducer<String, String> producer = buildProducer();
        KafkaConsumer<String, String> consumer = buildConsumer("group01");

        //初始化事务
        producer.initTransactions();

        consumer.subscribe(Collections.singletonList("my-topic"));

        while (true) {
            ConsumerRecords<String, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
            if (!consumerRecords.isEmpty()) {

                Iterator<ConsumerRecord<String, String>> iterator = consumerRecords.iterator();

                Map<TopicPartition, OffsetAndMetadata> offsetAndMetadataMap = new HashMap<>();
                //开启事务控制
                producer.beginTransaction();
                try {
                    //迭代数据,进行业务处理
                    while (iterator.hasNext()) {
                        //获取一个消息
                        ConsumerRecord<String, String> record = iterator.next();
                        String topic = record.topic();
                        System.out.println("topic = " + topic);
                        int partition = record.partition();
                        System.out.println("partition = " + partition);
                        long offset = record.offset();
                        System.out.println("offset = " + offset);

                        String key = record.key();
                        System.out.println("key = " + key);
                        String value = record.value();
                        System.out.println("value = " + value);
                        long timestamp = record.timestamp();
                        System.out.println("timestamp = " + timestamp);

                        offsetAndMetadataMap.put(new TopicPartition(topic,partition),new OffsetAndMetadata(offset+1));

                        ProducerRecord<String, String> producerRecord = new ProducerRecord<>("my-topic", record.key() , record.value()+"yuyang");

                        producer.send(producerRecord);
                    }

                    //提交事务
                    //先提交消费者的偏移量
                    producer.sendOffsetsToTransaction(offsetAndMetadataMap,"group01");
                    //再提交生产者的偏移量
                    producer.commitTransaction();
                }catch (Exception e){
                    System.err.println("异常原因:"+e);
                    producer.abortTransaction();
                }


            }
        }
    }

    public static KafkaProducer<String,String> buildProducer(){
        //1.创建链接参数
        Properties props=new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class);

        //必须配置事务ID,必须是唯一的
        props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,"transaction-id"+UUID.randomUUID().toString());
        //配置kafka批处理大小
        props.put(ProducerConfig.BATCH_SIZE_CONFIG,1024);
        //等待5ms如果batch中数量不足1024大小,也会发送
        props.put(ProducerConfig.LINGER_MS_CONFIG,5);
        //开启kafka的重试机制和幂等性
        props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG,true);
        props.put(ProducerConfig.ACKS_CONFIG,-1);
        props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG,10000);//请求超时

        //2.创建生产者
        return new KafkaProducer<String, String>(props);
    }

    public static KafkaConsumer<String,String> buildConsumer(String group){
        Properties prop = new Properties();
        prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
        prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class);
        prop.put(ConsumerConfig.GROUP_ID_CONFIG,group);//消费者组
        //设置消费者的消费事务的隔离级别
        prop.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG,"read-committed");
        //必须关闭消费者的offset自动提交
        prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);

        //2.创建Topic消费者
        return new KafkaConsumer<String, String>(prop);
    }

kafka数据同步机制

Kafka的Topic被分为多个分区,分区是是按照Segments存储文件块。分区日志是存储在磁盘上的日志序列,Kafka可以保证分区里的事件是有序的。其中Leader负责对应分区的读写、Follower负责同步分区的数据,0.11 版本之前Kafka使用highwatermarker机制保证数据的同步,但是基于highwatermarker的同步数据可能会导致数据的不一致或者是乱序。在Kafka数据同步有以下概念。

LEO:log end offset 标识的是每个分区中最后一条消息的下一个位置,分区的每个副本都有自己的 LEO.

HW: high watermarker称为高水位线,所有HW之前的的数据都理解是已经备份的,当所有节点都备 份成功,Leader会更新水位线。

ISR:In-sync-replicas,kafka的leader会维护一份处于同步的副本集和,如果在replica.lag.time.max.ms时间内系统没有发送fetch请求,或者已然在发送请求,但是在该限定时间内没有赶上Leader的数据就被剔除ISR列表。在Kafka-0.9.0版本剔除replica.lag.max.messages消息个数限定,因为这个会导致其他的Broker节点频繁的加入和退出ISR。

kafka与springboot结合

		<dependency>
            <groupId>org.springframework.bootgroupId>
            <artifactId>spring-boot-starterartifactId>
        dependency>
        <dependency>
            <groupId>org.springframework.kafkagroupId>
            <artifactId>spring-kafkaartifactId>
        dependency>

        <dependency>
            <groupId>org.apache.kafkagroupId>
            <artifactId>kafka-streamsartifactId>
            <version>2.0.1version>
        dependency>

        <dependency>
            <groupId>org.springframework.bootgroupId>
            <artifactId>spring-boot-starter-testartifactId>
            <scope>testscope>
        dependency>
        <dependency>
            <groupId>org.springframework.kafkagroupId>
            <artifactId>spring-kafka-testartifactId>
            <scope>testscope>
        dependency>

配置文件

spring.kafka.bootstrap-servers=localhost:9092

#重试次数
spring.kafka.producer.retries=5
#ACK -1
spring.kafka.producer.acks=all    
spring.kafka.producer.batch-size=16384
spring.kafka.producer.buffer-memory=33554432
#开启事务 #唯一id
#spring.kafka.producer.transaction-id-prefix=transaction-id-
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
#幂等
spring.kafka.producer.properties.enable.idempotence=true


spring.kafka.consumer.group-id=group1
#偏移量
spring.kafka.consumer.auto-offset-reset=earliest
#偏移量自动提交
spring.kafka.consumer.enable-auto-commit=true
spring.kafka.consumer.auto-commit-interval=100
#事务隔离级别
spring.kafka.consumer.properties.isolation.level=read_commited
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringSerializer

消费

@KafkaListeners(
            value = {@KafkaListener(topics = {"topic01"})}
    )
    public void recevice01(ConsumerRecord<String,String> record){
        System.out.println("record = " + record);
    }


    @KafkaListeners(
            value = {@KafkaListener(topics = {"topic02"})}
    )
    @SendTo("topic03")//接收到请求后向某个分区发送 //可以对消息进行处理
    public String recevice02(ConsumerRecord<String,String> record){
        return record.value()+"\t"+"yuyang";
    }

生产者

public interface MessageSender {
    public void sendMessage(String topic,String key,String message);
}
@Service
//Transactional   //开启事务支持
public class MessageSenderImpl implements MessageSender {
    @Autowired
    private KafkaTemplate<String,String> kafkaTemplate;

    @Override
    public void sendMessage(String topic, String key, String message) {
        kafkaTemplate.send(new ProducerRecord<>(topic,key,message));
    }
}

kafka事务方式

public void testSendMessageInTransaction(){
        kafkaTemplate.executeInTransaction(new KafkaOperations.OperationsCallback (String topic, String key, String message) {
            @Override
            public Object doInOperations(KafkaOperations kafkaOperations) {
                return kafkaOperations.send(new ProducerRecord
                        (topic,key,message));
            }
        });
    }

@Service
@Transactional   //开启事务支持
public class MessageSenderImpl implements MessageSender {
    @Autowired
    private KafkaTemplate<String,String> kafkaTemplate;

    @Override
    public void sendMessage(String topic, String key, String message) {
        kafkaTemplate.send(new ProducerRecord<>(topic,key,message));
    }
}

你可能感兴趣的:(kafka,linq,分布式)