org.apache.kafka
kafka-clients
1.0.1
org.springframework.kafka
spring-kafka
1.3.5.RELEASE
org.apache.kafka
kafka-clients
# brokers集群
kafka.producer.bootstrap.servers = xxxx:9092
kafka.producer.acks = all
#发送失败重试次数
kafka.producer.retries = 3
kafka.producer.linger.ms = 10
# 33554432 即32MB的批处理缓冲区
kafka.producer.buffer.memory = 40960
#批处理条数:当多个记录被发送到同一个分区时,生产者会尝试将记录合并到更少的请求中。这有助于客户端和服务器的性能
kafka.producer.batch.size = 4096
kafka.producer.defaultTopic = test
kafka.producer.key.serializer = org.apache.kafka.common.serialization.StringSerializer
kafka.producer.value.serializer = org.apache.kafka.common.serialization.StringSerializer
################# kafka consumer ################## ,
kafka.consumer.bootstrap.servers = xxxx:9092
# 如果为true,消费者的偏移量将在后台定期提交
kafka.consumer.enable.auto.commit = true
#如何设置为自动提交(enable.auto.commit=true),这里设置自动提交周期
kafka.consumer.auto.commit.interval.ms=100
#order-beta 消费者群组ID,发布-订阅模式,即如果一个生产者,多个消费者都要消费,那么需要定义自己的群组,同一群组内的消费者只有一个能消费到消息
kafka.consumer.group.id = xxxA
#在使用Kafka的组管理时,用于检测消费者故障的超时
kafka.consumer.session.timeout.ms = 30000
kafka.consumer.key.deserializer = org.apache.kafka.common.serialization.StringDeserializer
kafka.consumer.value.deserializer = org.apache.kafka.common.serialization.StringDeserializer
#设置批量参数
kafka.consumer.maxPollRecords=10000
#配置多个topic,多个用逗号隔开
topics=xx,xx,xx,xx
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.listener.ConcurrentMessageListenerContainer;
import org.springframework.stereotype.Component;
/**
* @Description: kafka 消费者配置类
* @Author:panboyang
* @date:2019/11/8 17:23
*/
@Component
@EnableKafka
public class KafkaConsumerConfig {
private static final Logger log= LoggerFactory.getLogger(KafkaConsumerConfig.class);
@Value("${kafka.consumer.group.id}")
private String groupId;
@Value("${kafka.producer.bootstrap.servers}")
private String list;
@Value("${kafka.consumer.enable.auto.commit}")
private String commit;
@Value("${kafka.consumer.auto.commit.interval.ms}")
private String commitMs;
@Value("${kafka.consumer.session.timeout.ms}")
private String timeoutMs;
@Value("${kafka.consumer.key.deserializer}")
private String keyDeserializer;
@Value("${kafka.consumer.key.deserializer}")
private String valueDeserializer;
@Value("${kafka.consumer.maxPollRecords}")
private String maxPollRecords;
@Bean
KafkaListenerContainerFactory>
kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory factory =
new ConcurrentKafkaListenerContainerFactory();
factory.setConsumerFactory(consumerFactory());
factory.setConcurrency(3);
factory.getContainerProperties().setPollTimeout(3000);
//设置批量消费
factory.setBatchListener(true);
return factory;
}
@Bean
public ConsumerFactory consumerFactory() {
return new DefaultKafkaConsumerFactory(consumerProperties());
}
@Bean
public Map consumerProperties() {
Map props= new HashMap();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, list);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, commit);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,commitMs);
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, timeoutMs);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); //配置批量参数
return props;
}
@Bean
public KafkaConsumerListenser kafkaConsumerListener(){
return new KafkaConsumerListenser();
}
}
import java.util.HashMap;
import java.util.Map;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
/**
* @author: panboyang
@date:2019/11/8 17:23
* @description kafka 生产者配置
*/
@Configuration
@EnableKafka
public class KafkaProducerConfig {
@Value("${kafka.producer.bootstrap.servers}")
private String servers;
@Value("${kafka.producer.retries}")
private String retries;
@Value("${kafka.producer.batch.size}")
private String size;
@Value("${kafka.producer.linger.ms}")
private String ms;
@Value("${kafka.producer.buffer.memory}")
private String memory;
@Value("${kafka.producer.key.serializer}")
private String keySerializer;
@Value("${kafka.producer.value.serializer}")
private String valueSerializer;
@Value("${kafka.producer.acks}")
private String acks;
@Value("${kafka.producer.defaultTopic}")
private String defaultTopic;
public KafkaProducerConfig(){
System.out.println("kafka生产者配置");
}
@Bean
public ProducerFactory producerFactory() {
return new DefaultKafkaProducerFactory(producerProperties());
}
@Bean
public Map producerProperties() {
Map props = new HashMap();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializer);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,valueSerializer);
props.put(ProducerConfig.RETRIES_CONFIG,retries);
props.put(ProducerConfig.BATCH_SIZE_CONFIG,size);
props.put(ProducerConfig.LINGER_MS_CONFIG,ms);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG,memory);
props.put(ProducerConfig.ACKS_CONFIG,acks);
return props;
}
@Bean
public KafkaTemplate kafkaTemplate() {
KafkaTemplate kafkaTemplate = new KafkaTemplate(producerFactory(),true);
kafkaTemplate.setDefaultTopic(defaultTopic);
return kafkaTemplate;
}
}
package com.comtop.smart.ipdp.dataIntegration.boLianDataPlatform.service;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import com.alibaba.fastjson.JSONArray;
import com.comtop.eic.apache.poi.ss.formula.functions.T;
import com.comtop.smart.ipdp.dataIntegration.boLianDataPlatform.model.BlUpdateVO;
import com.comtop.smart.ipdp.dataIntegration.boLianDataPlatform.model.FileInfoVO;
import com.comtop.smart.ipdp.dataIntegration.boLianDataPlatform.util.KafkaConsumerUtil;
import com.comtop.smart.ipdp.dataIntegration.importTask.mapper.DataImportTaskMapper;
import com.comtop.smart.ipdp.dataIntegration.importTask.model.DataImportTaskVO;
/**
* @Description: kafka监听类
* @Author:panboyang
* @date:2019/11/8 17:23
*/
public class KafkaConsumerListenser {
/* 日志 */
protected final Logger LOGGER = LoggerFactory
.getLogger(KafkaConsumerListenser.class);
@Autowired
private DataImportTaskMapper dataImportTaskMapper;
@Autowired
private DataFileParseService dataFileParseService;
/**
* @Description: kafka监听类 ,containerFactory设置为批量接收参数 ,Acknowledgment为偏移量
* @Author:panboyang
* @date:2019/11/11 17:23
*
*/
@SuppressWarnings("unchecked")
@KafkaListener(groupId = "groupA", topics = "#{'${topics}'.split(',')}", containerFactory = "kafkaListenerContainerFactory")
void listener(List> records) {
List
这偏移量可以手动提交,有什么值得改善的地方请留言哦