spark实战:对日志进行查询

应用spark的版本是1.5
代码如下:

//import SogouQ3_utf.txt as RDD, and convert this RDD to Dataframe
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StructType,StructField,StringType}
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
val data = sc.textFile("file:/usr/local/test/SogouQ3_utf.txt")
val schemaString = "time user word ranking number url"
val schema =
  StructType(
schemaString.split(" ").map(fieldName => StructField(fieldName, StringType, true)))
val rowRDD = data.map(_.split("\t")).map(p=>Row(p(0),p(1),p(2),p(3),p(4),p(5)))
val dataDataFrame = sqlContext.createDataFrame(rowRDD, schema)
dataDataFrame.registerTempTable("data")
dataDataFrame.printSchema()
val df = dataDataFrame
//most hot query words top10
val grouped1 = df.groupBy("word").count()
val grouped1_sort = grouped1.sort(grouped1("count").desc)
grouped1_sort.registerTempTable("word_top10")
val result1 = sqlContext.sql("select * from word_top10 limit 10")
//user who queries top10
val grouped2 = df.groupBy("user").count()
val grouped2_sort = grouped2.sort(grouped2("count").desc)
grouped2_sort.registerTempTable("user_top10")
val result2 = sqlContext.sql("select * from user_top10 limit 10")
//most hot website top50
val data_regex = sqlContext.sql("""select time, user, word, ranking, number, 
regexp_extract(url, '(http|https)://([\!\w\.-]+)/', 2) from data""").toDF("time", "user", "word", "ranking", 
"number", "url_regex")
val grouped3 = data_regex.groupBy("url_regex").count()
val grouped3_sort = grouped3.sort(grouped3("count").desc)
grouped3_sort.registerTempTable("site_top50")
val result3 = sqlContext.sql("select * from site_top50 limit 50")
//save to hadoop
import com.databricks.spark.csv
val saveOptions1 = Map("header" -> "true", "path" -> "/output1")
result1.write.format("com.databricks.spark.csv").options(saveOptions1).save()
val saveOptions2 = Map("header" -> "true", "path" -> "/output2")
result2.write.format("com.databricks.spark.csv").options(saveOptions2).save()
val saveOptions3 = Map("header" -> "true", "path" -> "/output3")
result3.write.format("com.databricks.spark.csv").options(saveOptions3).save()

你可能感兴趣的:(spark实战:对日志进行查询)