springboot 使用kafka 使用记录

  1. 加入依赖jar


org.springframework.kafka
spring-kafka
  1. naocs配置信息

spring: 
 kafka:
    bootstrap-servers: ${kafka.kafkaServers}
    producer:
      # 发生错误后,消息重发的次数。
      retries: 0
      #当有多个消息需要被发送到同一个分区时,生产者会把它们放在同一个批次里。该参数指定了一个批次可以使用的内存大小,按照字节数计算。
      batch-size: 16384
      # 设置生产者内存缓冲区的大小。
      buffer-memory: 33554432
      # 键的序列化方式
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      # 值的序列化方式
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
      # acks=0 : 生产者在成功写入消息之前不会等待任何来自服务器的响应。
      # acks=1 : 只要集群的首领节点收到消息,生产者就会收到一个来自服务器成功响应。
      # acks=all :只有当所有参与复制的节点全部收到消息时,生产者才会收到一个来自服务器的成功响应。
      acks: 1
    consumer:
      group-id: task_mdb_job_status_group
      # 该属性指定了消费者在读取一个没有偏移量的分区或者偏移量无效的情况下该作何处理: latest最新, earliest起始位置
      auto-offset-reset: latest
      # 是否自动提交偏移量,默认值是true
      enable-auto-commit: true
      # 自动提交的时间间隔 在spring boot 2.X 版本中这里采用的是值的类型为Duration 需要符合特定的格式,如1S,1M,2H,5D
      auto-commit-interval: 1000
      # 控制每次 poll() 调用返回的最大记录数
      max-poll-records: 10
      #延长轮询间隔时间,或者增加消费者实例的数量来提高并发度
      max.poll.interval.ms: 600000
      # 消费会话超时时间(超过这个时间consumer没有发送心跳,就会触发rebalance操作)
      session.timeout.ms: 600000
      # 消费请求超时时间
      request.timeout.ms: 90000
      # 键的反序列化方式
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      # 值的反序列化方式
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    listener:
      # 在侦听器容器中运行的线程数。
      concurrency: 1
      #listner负责ack,每调用一次,就立即commit
      ack-mode: RECORD
      missing-topics-fatal: false
    autoStartup: true
kafka:
  kafkaServers: http://dev-kafka01.test.cloud:9092
  check_result_kafka_topic: hd-check-task-status
  consumer_docker_job_status:
    topic: hd-docker-job-status
    groupId: check_docker_job_status_group

3.配置kafka通用配置类KafkaConfig

package com.xxx.check.config;

import com.google.common.collect.Maps;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;

import java.util.HashMap;
import java.util.Map;

/**
 * 类描述 

* Copyright: Copyright © 2020 ECARX Co., Ltd. All Rights Reserved.

* Company: xxx科技有限公司

* * @Author: smartone * @Date: 2021/3/2 19:30 */ @Configuration @EnableKafka @Slf4j public class KafkaConfig { @Value("${spring.kafka.bootstrap-servers}") private String servers; @Value("${spring.kafka.consumer.enable-auto-commit}") private boolean enableAutoCommit; @Value("${spring.kafka.consumer.session.timeout.ms}") private String sessionTimeout; @Value("${spring.kafka.consumer.request.timeout.ms}") private String requestTimeout; @Value("${spring.kafka.consumer.auto-commit-interval}") private String autoCommitInterval; @Value("${spring.kafka.consumer.group-id}") private String groupId; @Value("${spring.kafka.consumer.auto-offset-reset}") private String autoOffsetReset; @Value("${spring.kafka.listener.concurrency}") private int concurrency; @Value("${spring.kafka.autoStartup}") private Boolean autoStartup; @Value("${spring.kafka.consumer.max.poll.interval.ms}") private int maxPollInterval; @Value("${spring.kafka.consumer.max-poll-records}") private int maxPollRecords; @Bean public KafkaListenerContainerFactory> kafkaListenerContainerFactory() { ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(consumerFactory()); factory.setConcurrency(concurrency); factory.setMissingTopicsFatal(false); factory.getContainerProperties().setPollTimeout(1500); factory.setBatchListener(false); // 是否开启监听 factory.setAutoStartup(autoStartup); return factory; } /** * 不使用spring boot默认方式创建的DefaultKafkaConsumerFactory,重新定义创建方式 * @return */ public ConsumerFactory consumerFactory() { log.info("init kafka Factory===="); return new DefaultKafkaConsumerFactory<>(consumerConfigs()); } @Bean public Map consumerConfigs() { // Map props = Maps.newHashMap(); // props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers); // props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); // props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); // return props; Map propsMap = new HashMap<>(); propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers); propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit); propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval); propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout); propsMap.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout); propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollInterval); propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); // propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecordsConfig);// 每个批次获取数 return propsMap; } }

4.kafka监听消息ListenerAfterBuild

package com.xxx.check.excutor.workflow.checkafterbuild;

import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.sql.SQLException;
import java.text.ParseException;
import java.util.Optional;

/**
 * @Author:wangzy
 * @CreateDate: 2019/8/29
 * opic监听
 */
@Slf4j
@Component
@SuppressWarnings("ALL")
public class ListenerAfterBuild {
    /**
     * KafkaListener
     *
     * @param record
     * @throws IOException
     */
    @KafkaListener(topics = {"${kafka.consumer_check_after_build.topic}"}, groupId = "${kafka.consumer_check_after_build.groupId}")
    public void listener(ConsumerRecord record) throws IOException, NoSuchMethodException, org.locationtech.jts.io.ParseException, InstantiationException, SQLException, ParseException, IllegalAccessException, InvocationTargetException, ClassNotFoundException {
        Optional message = Optional.ofNullable(record.value());
        if (message.isPresent()) {
              log.info("consumer topic:{} message:{}", record.topic(),  record.value());
        }
    }

}

5.kafka 生产消息KafkaSendService

package com.xxx.check.kafka;

import com.xxx.check.config.KafkaConfig;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Service;
import org.springframework.util.concurrent.ListenableFutureCallback;

/**
 * 通用的发送服务
 *
 * @author xxxx
 * @since 2023/1/13 17:47
 */
@Slf4j
@Service
public class KafkaSendService {
    @Autowired
    KafkaTemplate kafkaTemplate;
    @Value("${kafka.check_result_kafka_topic}")
    private String topic;

    public void sendMsg(String msg, Long jobId) {
        kafkaTemplate.send(topic, msg.getBytes()).addCallback(new ListenableFutureCallback>() {
            @Override
            public void onFailure(Throwable ex) {
                log.info("向工作台发送消息失败, jobId:{}, ex:{}", jobId, ex);
            }

            @Override
            public void onSuccess(SendResult result) {
                log.info("向工作台发送消息成功, jobId:{}, msg:{}", jobId, msg);
            }
        });
    }
}

你可能感兴趣的:(spring,boot,kafka,java)