因为CDH集群环境问题,我spark streaming程序的依赖就依照其版本来进行,但这就遇到一个问题,集群spark2支持的kafka版本是0.9.0,而我们程序操作zookeeper的ZkUtils类就不兼容了。
重新KafkaCluster类,兼容集群版本。
原程序单个topic的zk更新offset的方法:
val stream = createCustomDirectKafkaStream(ssc,kafkaParams,"advertidshadoop161v14taiji.cdn.ifengidc.com","/kafka", topics)
/*
* createDirectStream() method overloaded
*/
def createCustomDirectKafkaStream(ssc: StreamingContext, kafkaParams: Map[String, String], zkHosts: String
, zkPath: String, topics: Set[String]): InputDStream[(String, String)] = {
val topic = topics.last //TODO only for single kafka topic right now
val zkClient = new ZkClient(zkHosts, 30000, 30000)
val storedOffsets = readOffsets(zkClient,zkHosts, zkPath, topic)
val kafkaStream = storedOffsets match {
case None => // start from the latest offsets
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
case Some(fromOffsets) => // start from previously saved offsets
val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder
, (String, String)](ssc, kafkaParams, fromOffsets, messageHandler)
}
// save the offsets
kafkaStream.foreachRDD(rdd => saveOffsets(zkClient,zkHosts, zkPath, rdd))
kafkaStream
}
/*
* Read the previously saved offsets from Zookeeper
*/
private def readOffsets(zkClient: ZkClient,zkHosts:String, zkPath: String, topic: String):
Option[Map[TopicAndPartition, Long]] = {
logger.info("Reading offsets from Zookeeper")
val stopwatch = new Stopwatch()
val (offsetsRangesStrOpt, _) = ZkUtils.readDataMaybeNull(zkClient, zkPath)
offsetsRangesStrOpt match {
case Some(offsetsRangesStr) =>
logger.info(s"Read offset ranges: ${offsetsRangesStr}")
val offsets = offsetsRangesStr.split(",")
.map(s => s.split(":"))
.map { case Array(partitionStr, offsetStr) => (TopicAndPartition(topic, partitionStr.toInt) -> offsetStr.toLong) }
.toMap
logger.info("Done reading offsets from Zookeeper. Took " + stopwatch)
Some(offsets)
case None =>
logger.info("No offsets found in Zookeeper. Took " + stopwatch)
None
}
}
private def saveOffsets(zkClient: ZkClient,zkHosts:String, zkPath: String, rdd: RDD[_]): Unit = {
logger.info("Saving offsets to Zookeeper")
val stopwatch = new Stopwatch()
val offsetsRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
offsetsRanges.foreach(offsetRange => logger.debug(s"Using ${offsetRange}"))
val offsetsRangesStr = offsetsRanges.map(offsetRange => s"${offsetRange.partition}:${offsetRange.fromOffset}")
.mkString(",")
logger.info("Writing offsets to Zookeeper zkClient="+zkClient+" zkHosts="+zkHosts+"zkPath="+zkPath+" offsetsRangesStr:"+ offsetsRangesStr)
ZkUtils.updatePersistentPath(zkClient, zkPath, offsetsRangesStr)
logger.info("Done updating offsets in Zookeeper. Took " + stopwatch)
}
class Stopwatch {
private val start = System.currentTimeMillis()
override def toString() = (System.currentTimeMillis() - start) + " ms"
}
重写方法操作zk:
参考的github项目:https://github.com/xlturing/spark-journey/tree/master/SparkStreamingKafka