[置顶] SparkML实战之一:线性回归

package class8

import org.apache.log4j.{Logger, Level}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.{LinearRegressionWithSGD, LabeledPoint}

/** * Created by root on 16-1-12. */
object LinearRgression {
  def main(args: Array[String]) {
    // 屏蔽不必要的日志显示终端上
    Logger.getLogger("org.apache.spark").setLevel(Level.ERROR)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    // 设置运行环境
    val conf = new SparkConf().setAppName("LinearRegression").setMaster("local[4]")
    val sc = new SparkContext(conf)
    // Load and parse the data
    /* 1 -0.4307829,-1.63735562648104 -2.00621178480549 -1.86242597251066 -1.02470580167082 -0.522940888712441 -0.86317118542594 5 -1.04215728919298 -0.864466507337306 2 -0.1625189,-1.98898046126935 -0.722008756122123 -0.787896192088153 -1.02470580167082 -0.522940888712441 -0.863171185425 945 -1.04215728919298 -0.864466507337306 */
    val data = sc.textFile("/usr/local/spark/spark-data/data/class8/lpsa.data")
    val parsedData = data.map { line =>
      val parts = line.split(',')
      //特征放入dense向量
      LabeledPoint(parts(0).toDouble, Vectors.dense(parts(1).split(' ').map(_.toDouble)))
  }
   //构建模型
    val numIterations = 100
    //参数估计最大似然用的比较多,这里采用随机梯度下降法
    //sgd解决了梯度下降的两个问题: 收敛速度慢和陷入局部最优
    val model = LinearRegressionWithSGD.train(parsedData,numIterations)
    //评估模型并计算误差
    val valuesAndPreds = parsedData.map{
      point=>
        val prediction = model.predict(point.features)
        (point.label,prediction)
    }
    val MSE = valuesAndPreds.map{case(v,p) =>math.pow((v-p),2)}.reduce(_ + _)/valuesAndPreds.count()
    println("training Mean Squared Error = "+MSE)
    println(model.weights+"-------------")
    sc.stop()
  }
}

你可能感兴趣的:(spark,机器学习,ml)