springboot集成Kafka

yml配置

如果下游的数据处理不及时,可以提高每批次拉取的数量,配置时不仅要设置max-poll-records属性(批量消费最大数量),也要将type设置为batch(默认 single(单条),batch(批量)),不然会报类型无法转换的错误。

  kafka:
    # kafka的连接地址(注意,是kafka所在服务器的公网ip,不要写成zookeeper的了)
    bootstrap-servers: localhost:9097, localhost:9098, localhost:9099
    producer:
      #消息重发次数, 如果配置了事务,则不能为0,或者干脆不配置这个参数也可以
      retries: 3
      # 一次处理消息消息的大小batch,一批消息最大多大时发送
      batch-size: 16384
      # 生产者最大可发送的消息大小,内有多个batch,一旦满了,只有发送到kafka后才能空出位置,否则阻塞接受新消息
      buffer-memory: 33554432
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      #如果数据迟迟未达到 batch.size,sender 等待 linger.time之后就会发送数据
      linger-ms: 5
      # 确认等级ack, 如果配置了事务,那必须是-1或者all
      acks: all
      # 配置事务,名随便起
      transaction-id-prefix: kafka-transaction-
      #最小同步副本
      min:
        in-sync:
          replicas: 2

    consumer:
      # 自动提交时间间隔,前提需要开启自动提交,需要符合特定时间格式: 1S, 1M, 1H,2D(秒,分,小时,天)
      auto-commit-interval: 1S
      # 消费者在读取一个没有offset的分区或者offset无效时的策略,earliest是从头都。latest是不从头读
      auto-offset-reset: earliest
      # 批量消费最大数量
      max-poll-records: 500
      # 是否自动提交偏移量offset, 一般是false, 如果选false,则上面auto-commit-interval属性则无效
      enable-auto-commit: false
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    listener:
      # 手工提交,调用ack后,立刻提交offset
      ack-mode: manual_immediate
      # 容器运行的线程数
      concurrency: 4
      # 批量消费最大数量
      max-poll-records: 500
      # 批量消费
      type: batch

配置类

当Kafka消费能力不足,可以增加Topic的分区数,并且同时增加消费组的消费者数量,使消费者数=分区数。

@Configuration
public class KafkaConfig {

    @Bean
    public AdminClient initAdminClient(){
        Properties properties = new Properties();
        // 参数1: 固定写法,常量转换成字符串就是"bootstrap.servers",写这个字符串也行
        // 参数2: kafka所在的服务器ip:端口号, 多个集群用"," 隔开(要和application.yml写的spring.kafka.bootstrap-servers值一致,因为他俩一个意思)
        properties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9097, localhost:9098, localhost:9099");
        AdminClient adminClient = AdminClient.create(properties);
        return adminClient;
    }

    /***
     * 创建TopicName为topic.quick.initial的Topic并设置分区数为8以及副本数为1
     * 通过bean创建(bean的名字为initialTopic)
     * @return
     */
    @Bean
    public NewTopic initialTopic() {

        return new NewTopic("jf0",3, (short) 1 );
    }


    /**
     * 此种@Bean的方式,如果topic的名字相同,那么会覆盖以前的那个
     * 修改后|分区数量会变成11个 注意分区数量只能增加不能减少
     * @return
     */
    @Bean
    public NewTopic initialTopic2() {

        return new NewTopic("jf1",3, (short) 2 );
    }

    /**
     * 此种@Bean的方式,如果topic的名字相同,那么会覆盖以前的那个
     * 修改后|分区数量会变成11个 注意分区数量只能增加不能减少
     * @return
     */
    @Bean
    public NewTopic initialTopic3() {

        return new NewTopic("jf2",3, (short) 2 );
    }
}

生产者代码

需要配置@Transactional注解,不配置会报错,该例子循环发送1w条数据,根据回调添加对应的逻辑操作。

    @GetMapping("/sendKafka0")
    @Transactional
    public void sendKafka0() {

        for (int i = 0; i < 10000; i++) {
            String s = UUID.randomUUID().toString();
            KafkaTest kafkaTest = new KafkaTest();
            kafkaTest.setContent(s);
            kafkaTest.setUpdateTime(new Date());
            kafkaTestMapper.insert(kafkaTest);
            ListenableFuture<SendResult<String, String>> send = kafkaTemplate.send("jf0", JSON.toJSONString(kafkaTest));

            send.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {


                @Override
                public void onSuccess(SendResult<String, String> result) {
                    kafkaTest.setProducerStatus(1);
                    kafkaTest.setProducerOffset(result.getRecordMetadata().offset());
                    kafkaTestMapper.updateById(kafkaTest);
                    log.warn("生产者-发送消息成功:" + result);

                }

                @Override
                public void onFailure(Throwable ex) {
                    kafkaTest.setProducerStatus(-1);
                    log.error("生产者-发送消息失败:" + ex.getMessage());
                    kafkaTestMapper.updateById(kafkaTest);
                }

            });


        }

    }

消费者代码

topic分区为3,配置了三个消费者。

@Component
@Slf4j
public class KafkaConsumerTest {

    @Autowired
    private KafkaTestMapper kafkaTestMapper;

    // 简单消费者,groupId可以任意起
    @KafkaListener(id = "Consumer0", groupId = "jf0-group", topics = "jf0", topicPartitions = {
            @TopicPartition(topic = "jf0", partitions = {"0"}),
    }, containerFactory = "kafkaListenerContainerFactory")
    public void consumer0(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        this.consumer(records, ack, "Consumer0", "jf0-group");
    }

    @KafkaListener(id = "Consumer1", groupId = "jf1-group", topics = "jf0", topicPartitions = {
            @TopicPartition(topic = "jf0", partitions = {"1"}),
    }, containerFactory = "kafkaListenerContainerFactory")
    public void consumer1(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        this.consumer(records, ack, "Consumer1", "jf1-group");
    }

    @KafkaListener(id = "Consumer2", groupId = "jf2-group", topics = "jf0", topicPartitions = {
            @TopicPartition(topic = "jf0", partitions = {"2"}),
    }, containerFactory = "kafkaListenerContainerFactory")
    public void consumer3(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        this.consumer(records, ack, "Consumer2", "jf2-group");
    }


    public void consumer(List<ConsumerRecord<String, String>> records, Acknowledgment ack, String consumerId, String groupId) {
        System.err.println("每次拉取量为:"+records.size());
        for (ConsumerRecord<String, String> record : records) {
            String value = record.value();
            KafkaTest test = JSON.parseObject(value, KafkaTest.class);
            log.warn("消费记录:" + test);
            test = kafkaTestMapper.selectById(test.getId());
            if (null != test) {
                try {
                    test.setConsumerId(consumerId);
                    test.setGroupId(groupId);
                    test.setTopic(record.topic());
                    test.setConsumerStatus(1);
                    test.setUpdateTime(new Date());
                    test.setPartitionId(record.partition());
                    test.setConsumerOffset(record.offset());
                    Thread.sleep(3);
                } catch (Exception e) {
                    log.error(e.getMessage());
                    test.setConsumerStatus(-1);
                } finally {
                    kafkaTestMapper.updateById(test);
                }
            }
        }

        ack.acknowledge();

    }

}

你可能感兴趣的:(java,Kafka,kafka,spring,boot,java)