java自己手动控制kafka的offset操作

之前使用kafka的KafkaStream,让每个消费者和对应的patition建立对应的流来读取kafka上面的数据,如果comsumer得到数据,那么kafka就会自动去维护该comsumer的offset,例如在获取到kafka的消息后正准备入库(未入库),但是消费者挂了,那么如果让kafka自动去维护offset,它就会认为这条数据已经被消费了,那么会造成数据丢失。

但是kafka可以让你自己去手动提交,如果在上面的场景中,那么需要我们手动commit,如果comsumer挂了 那么程序就不会执行commit这样的话 其他同group的消费者又可以消费这条数据,保证数据不丢,先要做如下设置:

//设置不自动提交,自己手动更新offset
properties.put("enable.auto.commit", "false");

使用如下api提交:

consumer.commitSync();

注意:

刚做了个测试,如果我从kafka中取出5条数据,分别为1,2,3,4,5,如果消费者在执行一些逻辑在执行1,2,3,4的时候都失败了未提交commit,然后消费5做逻辑成功了提交了commit,那么offset也会被移动到5那一条数据那里,1,2,3,4 相当于也会丢失

如果是做消费者取出数据执行一些操作,全部都失败的话,然后重启消费者,这些数据会从失败的时候重新开始读取

所以消费者还是应该自己做容错机制

测试项目结构如下:

java自己手动控制kafka的offset操作_第1张图片

其中ConsumerThreadNew类:

package com.lijie.kafka;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
 * 
 *            
 * @Filename ConsumerThreadNew.java
 *
 * @Description 
 *
 * @Version 1.0
 *
 * @Author Lijie
 *
 * @Email [email protected]
 *    
 * @History
 *
  • Author: Lijie
  • *
  • Date: 2017年3月21日
  • *
  • Version: 1.0
  • *
  • Content: create
  • * */ public class ConsumerThreadNew implements Runnable { private static Logger LOG = LoggerFactory.getLogger(ConsumerThreadNew.class); //KafkaConsumer kafka生产者 private KafkaConsumer consumer; //消费者名字 private String name; //消费的topic组 private List topics; //构造函数 public ConsumerThreadNew(KafkaConsumer consumer, String topic, String name) { super(); this.consumer = consumer; this.name = name; this.topics = Arrays.asList(topic); } @Override public void run() { consumer.subscribe(topics); List> buffer = new ArrayList<>(); // 批量提交数量 final int minBatchSize = 1; while (true) { ConsumerRecords records = consumer.poll(100); for (ConsumerRecord record : records) { LOG.info("消费者的名字为:" + name + ",消费的消息为:" + record.value()); buffer.add(record); } if (buffer.size() >= minBatchSize) { //这里就是处理成功了然后自己手动提交 consumer.commitSync(); LOG.info("提交完毕"); buffer.clear(); } } } }

    MyConsume类如下:

    package com.lijie.kafka;
    import java.util.Properties;
    import java.util.concurrent.ExecutorService;
    import java.util.concurrent.Executors;
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    import org.slf4j.Logger;
    import org.slf4j.LoggerFactory;
    /**
     * 
     *            
     * @Filename MyConsume.java
     *
     * @Description 
     *
     * @Version 1.0
     *
     * @Author Lijie
     *
     * @Email [email protected]
     *    
     * @History
     *
  • Author: Lijie
  • *
  • Date: 2017年3月21日
  • *
  • Version: 1.0
  • *
  • Content: create
  • * */ public class MyConsume { private static Logger LOG = LoggerFactory.getLogger(MyConsume.class); public MyConsume() { // TODO Auto-generated constructor stub } public static void main(String[] args) { Properties properties = new Properties(); properties.put("bootstrap.servers", "10.0.4.141:19093,10.0.4.142:19093,10.0.4.143:19093"); //设置不自动提交,自己手动更新offset properties.put("enable.auto.commit", "false"); properties.put("auto.offset.reset", "latest"); properties.put("zookeeper.connect", "10.0.4.141:2181,10.0.4.142:2181,10.0.4.143:2181"); properties.put("session.timeout.ms", "30000"); properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); properties.put("group.id", "lijieGroup"); properties.put("zookeeper.connect", "192.168.80.123:2181"); properties.put("auto.commit.interval.ms", "1000"); ExecutorService executor = Executors.newFixedThreadPool(5); //执行消费 for (int i = 0; i < 7; i++) { executor.execute(new ConsumerThreadNew(new KafkaConsumer(properties), "lijietest", "消费者" + (i + 1))); } } }

    MyProducer类如下:

    package com.lijie.kafka;
    import java.util.Properties;
    import org.apache.kafka.clients.producer.KafkaProducer;
    import org.apache.kafka.clients.producer.ProducerRecord;
    /**
     * 
     *            
     * @Filename MyProducer.java
     *
     * @Description 
     *
     * @Version 1.0
     *
     * @Author Lijie
     *
     * @Email [email protected]
     *    
     * @History
     *
  • Author: Lijie
  • *
  • Date: 2017年3月21日
  • *
  • Version: 1.0
  • *
  • Content: create
  • * */ public class MyProducer { private static Properties properties; private static KafkaProducer pro; static { //配置 properties = new Properties(); properties.put("bootstrap.servers", "10.0.4.141:19093,10.0.4.142:19093,10.0.4.143:19093"); //序列化类型 properties .put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); //创建生产者 pro = new KafkaProducer<>(properties); } public static void main(String[] args) throws Exception { produce("lijietest"); } public static void produce(String topic) throws Exception { //模拟message // String value = UUID.randomUUID().toString(); for (int i = 0; i < 10000; i++) { //封装message ProducerRecord pr = new ProducerRecord(topic, i + ""); //发送消息 pro.send(pr); Thread.sleep(1000); } } }

    pom文件如下:

    
      4.0.0
      lijie-kafka-offset
      lijie-kafka-offset
      0.0.1-SNAPSHOT
      
        
          org.apache.kafka
          kafka_2.11
          0.10.1.1
        
        
          org.apache.hadoop
          hadoop-common
          2.2.0
        
        
          org.apache.hadoop
          hadoop-hdfs
          2.2.0
        
        
          org.apache.hadoop
          hadoop-client
          2.2.0
        
        
          org.apache.hbase
          hbase-client
          1.0.3
        
        
          org.apache.hbase
          hbase-server
          1.0.3
        
        
          org.apache.hadoop
          hadoop-hdfs
          2.2.0
        
        
          jdk.tools
          jdk.tools
          1.7
          system
          ${JAVA_HOME}/lib/tools.jar
        
        
          org.apache.httpcomponents
          httpclient
          4.3.6
        
      
      
        
          
            org.apache.maven.plugins
            maven-compiler-plugin
            
              1.7
              1.7
            
          
        
      
    

    补充:kafka javaAPI 手动维护偏移量

    我就废话不多说了,大家还是直接看代码吧~

    package com.kafka;
    import kafka.javaapi.PartitionMetadata;
    import kafka.javaapi.consumer.SimpleConsumer;
    import org.apache.kafka.clients.consumer.ConsumerRecord;
    import org.apache.kafka.clients.consumer.ConsumerRecords;
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    import org.apache.kafka.clients.consumer.OffsetAndMetadata;
    import org.apache.kafka.common.TopicPartition;
    import org.junit.Test;
    import java.util.*;
    public class ConsumerManageOffet {
    //broker的地址,
    //与老版的kafka的区别是,新版本的kafka把偏移量保存到了broker,而老版本的是把偏移量保存到了zookeeper中
    //所以在读取数据时,应当设置broker的地址
      private static String ips = "192.168.136.150:9092,192.168.136.151:9092,192.168.136.152:9092";
      public static void main(String[] args) {
        Properties props = new Properties();
        props.put("bootstrap.servers",ips);
        props.put("group.id","test02");
        props.put("auto.offset.reset","earliest");
        props.put("max.poll.records","10"); 
        props.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("my-topic"));
        System.out.println("---------------------");
        while(true){
          ConsumerRecords records = consumer.poll(10);
          System.out.println("+++++++++++++++++++++++");
          for(ConsumerRecord record: records){
            System.out.println("---");
            System.out.printf("offset=%d,key=%s,value=%s%n",record.offset(),
                record.key(),record.value());
          }
        }
      }
      //手动维护偏移量
      @Test
      public void autoManageOffset2(){
        Properties props = new Properties();
        //broker的地址
        props.put("bootstrap.servers",ips);
        //这是消费者组
        props.put("group.id","groupPP");
        //设置消费的偏移量,如果以前消费过则接着消费,如果没有就从头开始消费
        props.put("auto.offset.reset","earliest");
        //设置自动提交偏移量为false
        props.put("enable.auto.commit","false");
        //设置Key和value的序列化
        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        //new一个消费者
        KafkaConsumer consumer = new KafkaConsumer<>(props);
        //指定消费的topic
        consumer.subscribe(Arrays.asList("my-topic"));
        while(true){
          ConsumerRecords records = consumer.poll(1000);
          //通过records获取这个集合中的数据属于那几个partition
          Set partitions = records.partitions();
          for(TopicPartition tp : partitions){
            //通过具体的partition把该partition中的数据拿出来消费
            List> partitionRecords = records.records(tp);
            for(ConsumerRecord r : partitionRecords){
              System.out.println(r.offset()  +"   "+r.key()+"   "+r.value());
            }
            //获取新这个partition中的最后一条记录的offset并加1 那么这个位置就是下一次要提交的offset
            long newOffset = partitionRecords.get(partitionRecords.size() - 1).offset() + 1;
            consumer.commitSync(Collections.singletonMap(tp,new OffsetAndMetadata(newOffset)));
          }
        }
      }
    }
    

    以上为个人经验,希望能给大家一个参考,也希望大家多多支持脚本之家。如有错误或未考虑完全的地方,望不吝赐教。

    你可能感兴趣的:(java自己手动控制kafka的offset操作)