spark streaming读取kafka数据令丢失(二)

方式二:
方法二就是每次streaming 消费了kafka的数据后,将消费的kafka offsets更新到zookeeper。当你的程序挂掉或者升级的时候,就可以接着上次的读取,实现数据的零丢失和 at most once。而且使用checkpoint的方式可能会导致数据重复消费,spark streaming维护的offset和zookeeper维护的偏移量不同步导致数据丢失或者重复消费等。那么我们可以在dstream触发action的时候 特别是在output的时候实现offset更新,这样子就能确保已消费的数据能够将offsets更新到zookeeper。好了不多说,直接上代码。

def start(ssc:StreamingContext,
                   brokerList:String,
                   zkConnect:String,
                   groupId:String,
                   topic: String): InputDStream[(String, String)]  ={

    val zkClient = new ZkClient(zkConnect, 60000, 60000, new ZkSerializer {
      override def serialize(data: Object): Array[Byte] = {
        try {
          return data.toString().getBytes("UTF-8")
        } catch {
          case _: ZkMarshallingError => return null
        }
      }
      override def deserialize(bytes: Array[Byte]): Object = {
        try {
          return new String(bytes, "UTF-8")
        } catch {
          case _: ZkMarshallingError => return null
        }
      }
    })
    val kafkaParams = Map("metadata.broker.list" -> brokerList, "group.id" -> groupId,
      "zookeeper.connect"->zkConnect,
      "auto.offset.reset" -> kafka.api.OffsetRequest.SmallestTimeString)
    val topics = topic.split(",").toSet
    val broker = brokerList.split(",")(0).split(":")(0)
    val topicDirs = new ZKGroupTopicDirs(groupId, topic)
    val zkTopicPath = s"${topicDirs.consumerOffsetDir}"
    val children = zkClient.countChildren(s"${topicDirs.consumerOffsetDir}")
    var kafkaStream: InputDStream[(String, String)] = null
    var fromOffsets: Map[TopicAndPartition, Long] = Map()
    if (children > 0) {
      val topicList = List(topic)
      val req = new TopicMetadataRequest(topicList,0)
      val getLeaderConsumer = new SimpleConsumer(broker,9092,10000,10000,"OffsetLookup")
      val res = getLeaderConsumer.send(req)
      val topicMetaOption = res.topicsMetadata.headOption
      val partitions = topicMetaOption match{
        case Some(tm) =>
          tm.partitionsMetadata.map(pm=>(pm.partitionId,pm.leader.get.host)).toMap[Int,String]
        case None =>
          Map[Int,String]()
      }
      for (i <- 0 until children) {
        val partitionOffset = zkClient.readData[String](s"$zkTopicPath/${i}")
        val tp = TopicAndPartition(topic, i)
        val requestMin = OffsetRequest(Map(tp -> PartitionOffsetRequestInfo(OffsetRequest.EarliestTime,1)))
        val consumerMin = new SimpleConsumer(partitions(i),9092,10000,10000,"getMinOffset")
        val curOffsets = consumerMin.getOffsetsBefore(requestMin).partitionErrorAndOffsets(tp).offsets
        var nextOffset = partitionOffset.toLong
        if(curOffsets.nonEmpty  && nextOffset < curOffsets.head){
          nextOffset = curOffsets.head
        }
        fromOffsets += (tp -> nextOffset)
        fromOffsets += (tp -> partitionOffset.toLong)
        logger.info(tp.topic+":"+tp.partition +";partitionOffset:"+partitionOffset+"**********"+"nextOffset:"+nextOffset)
      }
      val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.topic, mmd.message())
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](
        ssc, kafkaParams, fromOffsets, messageHandler)
    } else {
      kafkaStream = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
    }
    var offsetRanges = Array[OffsetRange]()
    kafkaStream.transform { rdd =>
      offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd
    }.foreachRDD {
      rdd =>
      {
        for (o <- offsetRanges) {
          ZkUtils.updatePersistentPath(zkClient, s"${topicDirs.consumerOffsetDir}/${o.partition}", ````````
.fromOffset.toString)
           }
      }
    }
    kafkaStream
  }

你可能感兴趣的:(大数据,spark性能調优,spark及问题解决)