org.springframework.kafka
spring-kafka
kafka:
producer:
bootstrap-servers: 127.0.0.1:9002
batch-size: 32768 #一次最多发送数据量 32K
retries: 3 #发送失败后的重复发送次数
buffer-memory: 33554432 #32M批处理缓冲区
linger: 5 #延迟发送时间ms,如果未达到batch-size,但是时间达到linger将发送消息
consumer:
bootstrap-servers: 127.0.0.1:9002
auto-offset-reset: latest #新建消费组时从什么位置开始消费 latest:最近位置 earliest:最早位置
max-poll-records: 2000 #批量消费一次最大拉取的数据量
enable-auto-commit: false #是否开启自动提交
auto-commit-interval: 1000 #自动提交的间隔时间,自动提交开启时生效
session-timeout: 20000 #连接超时时间
max-poll-interval: 15000 #手动提交设置与poll的心跳数,如果消息队列中没有消息,等待毫秒后,调用poll()方法。如果队列中有消息,立即消费消息,每次消费的消息的多少可以通过max.poll.records配置。
max-partition-fetch-bytes: 10485760 #设置拉取数据的大小,10M
group-id: mpa-1 #消费组
listener:
batch-listener: true #是否开启批量消费,true表示批量消费
concurrencys: 5 #设置消费的线程数
poll-timeout: 1500 #只限自动提交
@Configuration
@EnableKafka
public class KafkaProducerConfig {
@Value("${kafka.producer.bootstrap-servers}")
private String bootstrapServers;
@Value("${kafka.producer.retries}")
private Integer retries;
@Value("${kafka.producer.batch-size}")
private Integer batchSize;
@Value("${kafka.producer.buffer-memory}")
private Integer bufferMemory;
@Value("${kafka.producer.linger}")
private Integer linger;
private Map producerConfigs() {
Map props = new HashMap<>(16);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ProducerConfig.RETRIES_CONFIG, retries);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize);
props.put(ProducerConfig.LINGER_MS_CONFIG, linger);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}
private ProducerFactory producerFactory() {
DefaultKafkaProducerFactory producerFactory = new DefaultKafkaProducerFactory<>(producerConfigs());
return producerFactory;
}
@Bean
public KafkaTemplate kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
@Configuration
@EnableKafka
public class KafkaConsumerConfig {
@Value("${kafka.consumer.bootstrap-servers}")
private String bootstrapServers;
@Value("${kafka.consumer.enable-auto-commit}")
private Boolean autoCommit;
@Value("${kafka.consumer.auto-commit-interval}")
private Integer autoCommitInterval;
@Value("${kafka.consumer.max-poll-records}")
private Integer maxPollRecords;
@Value("${kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
@Value("${kafka.listener.concurrencys}")
private Integer concurrency;
@Value("${kafka.listener.poll-timeout}")
private Long pollTimeout;
@Value("${kafka.consumer.session-timeout}")
private String sessionTimeout;
@Value("${kafka.listener.batch-listener}")
private Boolean batchListener;
@Value("${kafka.consumer.max-poll-interval}")
private Integer maxPollInterval;
@Value("${kafka.consumer.max-partition-fetch-bytes}")
private Integer maxPartitionFetchBytes;
@Value("${kafka.consumer.group-id}")
private String groupId;
private Map consumerConfigs() {
Map props = new HashMap<>(20);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeout);
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollInterval);
props.put(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxPartitionFetchBytes);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG,groupId);
return props;
}
/**
*
* @return
*/
@Bean
@ConditionalOnMissingBean(name = "kafkaBatchListener")
public KafkaListenerContainerFactory> kafkaBatchListener() {
ConcurrentKafkaListenerContainerFactory factory = kafkaListenerContainerFactory();
factory.setConcurrency(concurrency);
return factory;
}
private ConcurrentKafkaListenerContainerFactory kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
//批量消费
factory.setBatchListener(batchListener);
//如果消息队列中没有消息,等待timeout毫秒后,调用poll()方法。
// 如果队列中有消息,立即消费消息,每次消费的消息的多少可以通过max.poll.records配置。
//手动提交无需配置
factory.getContainerProperties().setPollTimeout(pollTimeout);
//设置提交偏移量的方式, MANUAL_IMMEDIATE 表示消费一条提交一次;MANUAL表示批量提交一次
//低版本的spring-kafka,ackMode需要引入AbstractMessageListenerContainer.AckMode.MANUAL
factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
return factory;
}
private ConsumerFactory consumerFactory() {
return new DefaultKafkaConsumerFactory<>(consumerConfigs());
}
}
@Component
@Slf4j
public class KafkaSender {
private final KafkaTemplate kafkaTemplate;
@Autowired
public KafkaSender(KafkaTemplate kafkaTemplate) {
this.kafkaTemplate = kafkaTemplate;
}
public void sendMessage(String topic, String message) {
log.info("Send msg:{}", message);
ListenableFuture> sender = kafkaTemplate.send(new ProducerRecord<>(topic, message));
sender.addCallback(
result -> log.info("Send success:offset({}),partition({}),topic({})",
result.getRecordMetadata().offset(),
result.getRecordMetadata().partition(),
result.getRecordMetadata().topic()),
ex -> log.error("Send fail:{}", ex.getMessage()));
}
}
/**
* 消息接收
*/
@Component
@Slf4j
public class KafkaConsumer {
/**
* containerFactory:定义批处理器,批处理消费的线程数由kafka.listener.concurrencys控制
* topics:消费的消息队列的topic
* @param records
* @param ack
*/
@KafkaListener(containerFactory = "kafkaBatchListener",topics = { "hello" })
public void batchListener1(List> records, Acknowledgment ack){
try {
records.forEach(record -> {
//TODO - 处理消息
log.info("receive {} msg:{}",record.topic(),record.value().toString());
});
} catch (Exception e) {
//TODO - 消息处理异常操作
log.error("kafka listen error:{}",e.getMessage());
} finally {
//手动提交偏移量
ack.acknowledge();
}
}
/**
* containerFactory:定义批处理器,批处理消费的线程数由kafka.listener.concurrencys控制
* topics:消费的消息队列的topic
* @param records
* @param ack
*/
@KafkaListener(containerFactory = "kafkaBatchListener",topics = {"hello"})
public void batchListener2(List> records, Acknowledgment ack){
try {
records.forEach(record -> {
//TODO - 处理消息
log.info("receive {} msg:{}",record.topic(),record.value().toString());
});
} catch (Exception e) {
//TODO - 消息处理异常操作
log.error("kafka listen error:{}",e.getMessage());
} finally {
//手动提交偏移量
ack.acknowledge();
}
}
}
项目地址https://gitee.com/xn-mg/netty_kafka