kafka 初探

引言

kafka早有耳闻,未尝一用,目前实际使用过rabbitmq和阿里云工作队列datahup,这次在实际项目中终于使用上了。根据项目要求,将订单流水对象进行mysql和es的双写,由es对外提供查询服务,数据库作为数据备份。
我这边当即想出两个策略:

  • 通过kafka,把订单数据推送进入kafka,然后消费者分组监听,实现mysql和es的双写
  • 通过插件,让插件监听mysql的bin日志,由插件进行数据同步(公司有个项目目
    前使用的就是这种方式)

最终还是选择了方式一,在对比多方插件的配置情况下和项目存在kafka的情况下,再使用插件进行数据同步显得有些多余(有部分插件同步方式提供消息队列进行异步同步)

引入问题

kafka具有异步、消峰、解耦的效果,但是引入看样子是很不错,但是实际使用起来也需要考虑其他问题

  • 消息重复消费,如何处理

数据双写,先说数据库消息对象中存在订单号,是由雪花算法生成的,同时把订单号设置为mysql主键id,再根据shardingjdbc配置的分库、分表策略,我们把一天的数据统一放在一张表里,通过数据库来实现数据库的消费者的重复消费;
es 通过RestHighLevelClient向es写入数据,存在相同数据他更新,不存在相同数据他就更新,所以说重复消费在当前项目很容易解决

  • 消息消费不掉阻塞消费,又该如何处理

kafka中的消费者是按照索引按顺序消费,这个索引未消费确认以前,是不会进行下一个索引的消费,这个倒是和datahup的消费方式类似。在默认配置情况下,消费首次消费失败后,他会继续再尝试9次,如果接连10次都消费失败,他就会舍弃当前消息,从而进行下一个索引的消费,他这么设计感觉是没问题,但是,他舍弃了消息,就代表这个消息无人处理了,就会造成项目中mysql和es的数据不统一,这是个大麻烦呀!好在他提供了修改默认配置的方法,我的策略是一个消息重复消费5次,每一次消费失败后延迟消费一段时间,如果5次都消费失败,就将其推入死信队列中,通知运营人员去处理消费失败的消息,人工介入。

下面直接看部分代码吧

引入依赖


        
            org.springframework.kafka
            spring-kafka
        

添加配置文件

  kafka:
    bootstrap-servers: 192.168.10.237:9092,192.168.10.238:9092,192.168.10.239:9092
    template:
      default-topic: kfpt-dev
    listener:
      ack-model: MANUAL_IMMEDIATE
    death-topic: ${spring.kafka.template.default-topic}.DLX

生产者代码


import com.alibaba.fastjson.JSON;
import com.xtm.platform.sharding.generator.entity.TOrderDetail;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;

import javax.annotation.Resource;

@Component
@Slf4j
public class KafkaProducer {

    @Resource
    private KafkaTemplate kafkaTemplate;

    @Value("${spring.kafka.template.default-topic}")
    private String topicUser;

    /**
     * 发送用户消息
     *
     * @param tOrderDetail 用户信息
     */
    public void sendMessage(TOrderDetail tOrderDetail) {
        ListenableFuture> listenableFuture = kafkaTemplate.send(topicUser, JSON.toJSONString(tOrderDetail));
        listenableFuture.addCallback(new ListenableFutureCallback>() {

            @Override
            public void onSuccess(SendResult result) {
                RecordMetadata metadata = result.getRecordMetadata();
                log.info("message sent to " + metadata.topic() + ", partition " + metadata.partition() + ", offset " + metadata.offset());
            }

            @Override
            public void onFailure(Throwable ex) {
                log.info(("send message failed with " + ex.getMessage()));
            }

        });
    }
}

消费者代码

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.xtm.platform.sharding.generator.entity.TOrderDetail;
import com.xtm.platform.sharding.service.OrderDetailService;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.listener.SeekToCurrentErrorHandler;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
import org.springframework.util.backoff.FixedBackOff;

import javax.annotation.Resource;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;

/**
 * @version : 1.0
 * @description: java类作用描述
 * @author: tianwen
 * @create: 2021/5/11 17:18
 **/
@Component
@Slf4j
public class KafkaSqlConsumer {

    @Autowired
    private OrderDetailService orderDetailService;

    @Value("${spring.kafka.bootstrap-servers}")
    private String servers;

    @Value("${spring.kafka.death-topic}")
    private String deathTopic;

    @Value("${spring.kafka.template.default-topic}")
    private String topic;

    private final String groups = "cs";

    @Resource
    private KafkaTemplate kafkaTemplate;

    @Bean("sqlKafkaListenerContainerFactory")
    public ConcurrentKafkaListenerContainerFactory containerFactory() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
        //设置提交偏移量的方式
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        SeekToCurrentErrorHandler seekToCurrentErrorHandler = new SeekToCurrentErrorHandler((consumerRecord, e) -> {
            log.error("异常.抛弃这个消息============,{}", consumerRecord.toString(), e);
            kafkaTemplate.send(deathTopic, JSON.toJSONString(DeathMessage.builder().topic(topic).message(consumerRecord.value().toString()).desc("同步sql").createAt(new Date()).build()));
        }, new FixedBackOff(15000L, 5L));
        factory.setErrorHandler(seekToCurrentErrorHandler);
        return factory;
    }


    public Map consumerConfigs() {
        Map props = new HashMap<>(10);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groups);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        //设置每次接收Message的数量
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000);
        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }


    @KafkaListener(topics = {"${spring.kafka.template.default-topic}"}, containerFactory = "sqlKafkaListenerContainerFactory", groupId = groups, concurrency = "3")
    public void consumerMsg(ConsumerRecord record, Acknowledgment ack) {
        try {
            log.info("自动topic是: {}, offset是: {}, value是: {}", record.topic(), record.offset(), record.value());
            TOrderDetail tOrderDetail = JSONObject.parseObject(record.value(), TOrderDetail.class);
            orderDetailService.addOrderDetail(tOrderDetail);
            ack.acknowledge();
        } catch (Exception e) {
            e.printStackTrace();
            log.error("kafka push to es fail,topic: {} group : {} body: {} ", record.topic(), "cs", record.value());
        }
    }
}

死信队列消费者


import com.alibaba.fastjson.JSON;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.listener.ContainerProperties;
import org.springframework.kafka.listener.SeekToCurrentErrorHandler;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
import org.springframework.util.backoff.FixedBackOff;

import javax.annotation.Resource;
import java.util.Date;
import java.util.HashMap;
import java.util.Map;

/**
 * @version : 1.0
 * @description: 死信队列
 * @author: tianwen
 * @create: 2021/5/11 17:18
 **/
@Component
@Slf4j
public class KafkaDeathConsumer {

    @Value("${spring.kafka.death-topic}")
    private String deathTopic;

    @Value("${spring.kafka.template.default-topic}")
    private String topic;

    @Resource
    private KafkaTemplate kafkaTemplate;

    @Value("${spring.kafka.bootstrap-servers}")
    private String servers;

    private final String groups = "death";

    @Bean("deathKafkaListenerContainerFactory")
    public ConcurrentKafkaListenerContainerFactory containerFactory() {
        ConcurrentKafkaListenerContainerFactory factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs()));
        //设置提交偏移量的方式
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        SeekToCurrentErrorHandler seekToCurrentErrorHandler = new SeekToCurrentErrorHandler((consumerRecord, e) -> {
            log.error("推入死信队列.抛弃这个消息============,{}", consumerRecord.toString(), e);
            kafkaTemplate.send(deathTopic, JSON.toJSONString(DeathMessage.builder().topic(topic).message(consumerRecord.value().toString()).desc("死信队列").createAt(new Date()).build()));
        }, new FixedBackOff(15000L, 5L));
        factory.setErrorHandler(seekToCurrentErrorHandler);
        return factory;
    }


    public Map consumerConfigs() {
        Map props = new HashMap<>(10);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groups);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, servers);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        //设置每次接收Message的数量
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 100);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000);
        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

    @KafkaListener(topics = {"${spring.kafka.death-topic}"}, containerFactory = "deathKafkaListenerContainerFactory", groupId = groups, concurrency = "1")
    public void consumerMsg(ConsumerRecord record, Acknowledgment ack) {
        log.info("自动topic是: {}, offset是: {}, value是: {}", record.topic(), record.offset(), record.value());
        DingUtil.pushMsgToDing("");
        ack.acknowledge();
    }
}

你可能感兴趣的:(kafka 初探)