如何实现精准一次性消费

如何实现精准一次性消费

将kafka中的数据读出来,写入到mysql中


```java
package com.doit.day01;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.sql.*;
import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.Properties;

public class KafkaToMysql {
    public static void main(String[] args) throws Exception {
        Properties props = new Properties();
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"linux01:9092");
        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"doit03");
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"false");
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
        Connection conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/test", "root", "123456");
        conn.setAutoCommit(false);

        PreparedStatement pps = conn.prepareStatement("insert into user_info  values(?,?,?)");
        PreparedStatement pps_off = conn.prepareStatement("insert into  t_offset values(?,?) on DUPLICATE key UPDATE offset = ?");
        PreparedStatement off = conn.prepareStatement("select offset from t_offset where topic_partition = ?");

//        consumer.subscribe(Arrays.asList("k2m1"));
        consumer.subscribe(Arrays.asList("k2m1"), new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            }
            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                try    {
                for (TopicPartition partition : partitions) {
                    int partition1 = partition.partition();
                    String topic = partition.topic();

                        off.setString(1,topic+"_"+partition1);
                        ResultSet resultSet = off.executeQuery();
                        while (resultSet.next()){
                            long offset = resultSet.getLong(1);
                            System.out.println("发生了消费者再均衡了,分区啥的都重新分配了,新的方案是:"+topic+","+partition1);
                            consumer.seek(new TopicPartition(topic,partition1),offset);
                        }
                    }

                }catch (SQLException e) {
                    e.printStackTrace();
                }
            }
        });

        while (true){
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMinutes(Integer.MAX_VALUE));
            for (ConsumerRecord<String, String> record : records) {
                try {
                    String value = record.value();
                    String[] arr = value.split(",");
                    pps.setInt(1,Integer.parseInt(arr[0]));
                    pps.setString(2,arr[1]);
                    pps.setString(3,arr[2]);
                    pps.execute();

                    String topic = record.topic();
                    int partition = record.partition();
                    long offset = record.offset();
                    pps_off.setString(1,topic+"_"+partition);
                    pps_off.setLong(2,offset+1);
                    pps_off.setLong(3,offset+1);

//                    if (arr[0].equals("4")){
//                        throw new Exception("自己造了个一场抛一下");
//                    }
                    pps_off.execute();

                    //提交事务
                    conn.commit();
                } catch (Exception e) {
                    e.printStackTrace();
                    //有异常了我就回滚事务
                    conn.rollback();
                }
            }
        }
    }
}

你可能感兴趣的:(java,kafka,mysql)