2.spark 读取流数据

package com.sparktest.bigdata.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds
object Driver02 {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[2]").setAppName("stream")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc,Seconds(5))
    //seconds batch 大小 5秒

    //为了实现历史批次数据的累加,需要指定检查点目录
    //可以是本机也可HDFS
    ssc.checkpoint(directory = "d:/data/check")
    val data = ssc.textFileStream("hdfs://192.168.56.101:9000/stream/")
    //val  data2 = sc.textFile("hdfs://192.168.56.101:9000/stream")
    val r1 = data.flatMap(_.split(" ")).map((_, 1))
    val r2=r1.reduceByKeyAndWindow((a:Int,b:Int)=>a+b,Seconds(5),Seconds(5)) // 窗口和间隔
    //val r2 = r1.updateStateByKey((seq: Seq[Int], option: Option[Int]) => Some(seq.sum + option.getOrElse(0)))
    r2.print()
    ssc.start
    ssc.awaitTermination
  }
}

你可能感兴趣的:(2.spark 读取流数据)