// XGBOOST测试
//调用形式
// /opt/app/spark-1.6.1/bin/spark-shell --master yarn-client --conf spark.executor.extraJavaOptions='-XX:PermSize=1024M' --driver-memory 6g --num-executors 80
//(续上) --executor-memory 6g --executor-cores 1 --jars /opt/app/spark-1.6.1/lib/xgboost4j-spark-0.5-jar-with-dependencies.jar
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.RandomForest
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.{SparkConf, SparkContext}
import ml.dmlc.xgboost4j.scala.spark.XGBoost
val df=sqlContext.sql("select * from databasename.tmp_ym_hotel_multiple_features_table_new_train")
val data=df.select(df("order_cii_notcancelcii"),df("city"),df("order_cii_ahead_1day"),df("order_cii_ahead_3days_avg")
,df("order_cii_ahead_7days_avg"),df("order_cii_30days_avg"),df("order_cii_ahead_sameoneweek"),df("order_cii_ahead_sametwoweeks_avg")
,df("star"),df("goldstar"),df("level"),df("ratingservice"),df("novoters"),df("week_day"),df("working_day"),df("cii_ahead_sameoneweek")
,df("cii_ahead_sametwoweeks_avg"),df("cii_ahead_samethreeweeks_avg"),df("cii_ahead_samefourweeks_avg"),df("simple_estimate_constant")
,df("cii_ahead_1day_avg"),df("cii_ahead_3days_avg"),df("cii_ahead_7days_avg"),df("order_ahead_lt_1days"),df("order_ahead_lt_2days")
,df("order_ahead_lt_3days"),df("order_ahead_lt_7days"),df("order_ahead_lt_14days"),df("order_alldays"),df("click_ahead_1day")
,df("click_ahead_2days"),df("click_ahead_3days"),df("click_ahead_7days"),df("click_ahead_14days"),df("browse_0day_uv")
,df("browse_1day_uv"),df("browse_2day_uv"),df("browse_3day_uv"),df("browse_4day_uv"),df("browse_5day_uv")
,df("browse_6day_uv"),df("browse_7_14day_uv"),df("browse_14daymore_uv"),df("order_cii_14days_avg"),df("order_cii_21days_avg")
,df("order_cii_ahead_samethreeweeks_avg"),df("order_cii_ahead_samefourweeks_avg"))
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
val trainData=data.map{line =>
val label=line(0).toString.toDouble
val value0=(1 to 16).map(i=> line(i).toString.toDouble )
val featureVector=Vectors.dense( value0.toArray)
LabeledPoint(label, featureVector)
}
// 测试开始+1
val numRound = 800
val paramMap = List(
"eta" -> 0.1f,
"max_depth" -> 6, //数的最大深度。缺省值为6 ,取值范围为:[1,∞]
"silent" -> 0, //取0时表示打印出运行时信息,取1时表示以缄默方式运行,不打印运行时信息。缺省值为0
"objective" -> "reg:linear", //定义学习任务及相应的学习目标
"eval_metric" -> "rmse", //校验数据所需要的评价指标
"nthread"-> 1 //XGBoost运行时的线程数。缺省值是当前系统可以获得的最大线程数
).toMap
val model = XGBoost.train(trainData, paramMap, numRound, nWorkers = 80, useExternalMemory = false)
val sql_test="select * from databasename.tmp_ym_hotel_multiple_features_table_test_7days" //10月31日~11月6日
val df1=sqlContext.sql(sql_test)
val data1=df1.select(df1("masterhotel"),df1("city"),df1("order_cii_ahead_1day"),df1("order_cii_ahead_3days_avg"),df1("order_cii_ahead_7days_avg")
,df1("order_cii_30days_avg"),df1("order_cii_ahead_sameoneweek"),df1("order_cii_ahead_sametwoweeks_avg"),df1("star"),df1("goldstar")
,df1("level"),df1("ratingservice"),df1("novoters"),df1("week_day"),df1("working_day"),df1("cii_ahead_sameoneweek"),df1("cii_ahead_sametwoweeks_avg")
,df1("cii_ahead_samethreeweeks_avg"),df1("cii_ahead_samefourweeks_avg"),df1("simple_estimate_constant"),df1("cii_ahead_1day_avg")
,df1("cii_ahead_3days_avg"),df1("cii_ahead_7days_avg"),df1("order_ahead_lt_1days"),df1("order_ahead_lt_2days")
,df1("order_ahead_lt_3days"),df1("order_ahead_lt_7days"),df1("order_ahead_lt_14days"),df1("order_alldays")
,df1("click_ahead_1day"),df1("click_ahead_2days"),df1("click_ahead_3days"),df1("click_ahead_7days")
,df1("click_ahead_14days"),df1("browse_0day_uv"),df1("browse_1day_uv"),df1("browse_2day_uv"),df1("browse_3day_uv")
,df1("browse_4day_uv"),df1("browse_5day_uv"),df1("browse_6day_uv"),df1("browse_7_14day_uv"),df1("browse_14daymore_uv")
,df1("order_cii_14days_avg"),df1("order_cii_21days_avg"),df1("order_cii_ahead_samethreeweeks_avg"),df1("order_cii_ahead_samefourweeks_avg"))
//LablePoint构建
// 修改!!!!
val testData=data1.map{line =>
val label=line(0).toString.toDouble
val value0=(1 to 16).map(i=> line(i).toString.toDouble)
val featureVector=Vectors.dense( value0.toArray)
featureVector
}
val predTrain = model.predict(testData)
val s=predTrain.collect()(0)
//s.length
//真实值
val data2=df1.select(df1("masterhotel"),df1("order_cii_notcancelcii"), df1("rank1"),df1("orderdate"))
val actual_frame=data2.toDF()
//构建DataFrame类型结果集
case class resultset(masterhotel:Int, //母酒店ID
quantity:Double, //真实产量
rank:Int, //排序
date:String, //日期
frcst_cii:Double //预测产量
)
val ac_1=actual_frame.collect()
val pr_1=predTrain.collect()(0)
val output0=(0 until ac_1.length).map( i =>resultset(ac_1(i)(0).toString.toInt,
ac_1(i)(1).toString.toDouble,
ac_1(i)(2).toString.toInt,
ac_1(i)(3).toString,
pr_1(i)(0).toString.toDouble
)).toDF()
//增加一列
val output=output0.withColumn("diff",abs($"quantity"-$"frcst_cii"))
// 计算MAE@100, MAE@500
val MAE100=output.filter($"rank"<=100).groupBy("date").avg("diff")
val MAE500=output.groupBy("date").avg("diff")
val mae100=MAE100.sort("date").collect()
val mae500=MAE500.sort("date").collect()
//结果打印
mae100.foreach(i => println("MAE100",i))
mae500.foreach(i => println("MAE500",i))