整理kafka消费者批量消费消息开发笔记。
kafka使用的是2.1.11.RELEASE版本
org.springframework.kafka
spring-kafka
2.1.11.RELEASE
Springboot项目启动类屏蔽掉自动配置
@SpringBootApplication(scanBasePackages ={"com.pengyingjun"},exclude = {KafkaAutoConfiguration.class})
新增kafka相关配置项
kafka.bootstrap-servers = kakfa.*.*.com:9092
kafka.consumer.auto-commit-interval = 1000
kafka.consumer.max-poll-records = 1000
kafka.consumer.enable-auto-commit = true
kafka.consumer.concurrency = 5
kafka.consumer.group-id = pengyingjun_log
kafka.consumer.auto-offset-reset = earliest
kafka.consumer.log_topic = pengyingjun
新增kafka消费者配置类
@Configuration
@EnableKafka
@Slf4j
public class KafkaConsumerConfig {
/** 以逗号分隔的主机:端口对列表,用于建立与Kafka群集的初始连接 */
@Value("${kafka.bootstrap-servers}")
private String servers;
/** 如果为true,则消费者的偏移量将在后台定期提交,默认值为true */
@Value("${kafka.consumer.enable-auto-commit}")
private boolean enableAutoCommit;
/** 心跳与消费者协调员之间的预期时间(以毫秒为单位),默认值为3000 */
@Value("${kafka.consumer.auto-commit-interval}")
private String autoCommitInterval;
/** 当Kafka中没有初始偏移量或者服务器上不再存在当前偏移量时该怎么办,默认值为latest,表示自动将偏移重置为最新的偏移量 可选的值为latest, earliest, none*/
@Value("${kafka.consumer.auto-offset-reset}")
private String autoOffsetReset;
/** 在监听器容器中运行的线程数 */
@Value("${kafka.consumer.concurrency}")
private int concurrency;
/** 一次调用poll()操作时返回的最大记录数,默认值为500 */
@Value("${kafka.consumer.max-poll-records}")
private int maxPollRecords;
/** 用于标识此使用者所属的使用者组的唯一字符串 */
@Value("${kafka.consumer.group-id}")
private String groupId;
/**
* 消费者批量工厂
*/
@Bean
public KafkaListenerContainerFactory> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
// 并发创建的消费者数量
factory.setConcurrency(concurrency);
// 设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
factory.setBatchListener(true);
factory.getContainerProperties().setPollTimeout(1500);
return factory;
}
/**
* 消费者配置信息
*/
private Map consumerConfigs() {
Map props = new HashMap<>(16);
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, 10485760);
props.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, 10485760);
return props;
}
}
新增kafka生产者配置类
@Configuration
@EnableKafka
public class KafkaProducerConfig {
@Value("${kafka.bootstrap-servers}")
private String servers;
private Map producerConfigs() {
Map props = new HashMap<>(8);
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
props.put(ProducerConfig.RETRIES_CONFIG, 0);
props.put(ProducerConfig.BATCH_SIZE_CONFIG, 1000);
props.put(ProducerConfig.LINGER_MS_CONFIG, 1);
props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 1000);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
return props;
}
private ProducerFactory producerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
@Bean
public KafkaTemplate kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
消费监听逻辑代码
@KafkaListener(topics = "pengyingjun", groupId = "pengyingjun_log", containerFactory = "kafkaListenerContainerFactory")
public void handleHotValue(List> records){
List messages = new ArrayList<>();
for (ConsumerRecord, ?> record : records) {
Optional> kafkaMessage = Optional.ofNullable(record.value());
kafkaMessage.ifPresent(o -> messages.add(o.toString()));
}
if (messages.size() > 0) {
//业务处理逻辑
}
}
模拟大数据量消息代码
@Test
public void testSendKafka() throws InterruptedException {
int clientTotal = 10000;
int threadTotal = 200;
ExecutorService executorService = Executors.newCachedThreadPool();
final Semaphore semaphore = new Semaphore(threadTotal);
final CountDownLatch countDownLatch = new CountDownLatch(clientTotal);
for (int i = 0; i < clientTotal ; i++) {
executorService.execute(() -> {
try {
semaphore.acquire();
String log = "223.104.63.101 - - [1594828915] \"GET /click/track?s0=WxAppStart&sm0=&sk0=&sRemarks0=&t0=GoodsDetailPage&tm0=&tk0=ABCDEFG&ts0=1594828915186 HTTP/1.1\" 200 \"Mozilla/5.0 (iPhone; CPU iPhone OS 9_2_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13D15 M\n" +
"icroMessenger/7.0.9(0x17000929) NetType/4G Language/zh_CN\" \"223.104.63.101\" \"click.dalingheart.com\" \"-\" \"wxapp\" \"0000070800011202756008308\" \"2\" \"-\" \"-\" \"\" \"-\" \"-\"";
kafkaTemplate.send("dhclick", log);
semaphore.release();
} catch (Exception e) {
log.error("exception >>> ", e);
}
countDownLatch.countDown();
});
}
countDownLatch.await();
executorService.shutdown();
}
至此,完成了kafka批量消费需求。