[置顶] SparkML实战之二:Kmeans

package class8

import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.Vectors

/** * Created by root on 16-1-12. */
object Kmeans {
  def main(args: Array[String]) {
    // 屏蔽不必要的日志显示在终端上
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
    //设置运行环境
    val conf = new SparkConf().setAppName("Kmeans").setMaster("local[4]")
    //在集群上运行需要设置set.Master("spark://moon:7077")并且要打包
    //sc.addJar("/path/to/jarfile")
    val sc = new SparkContext(conf)
    //装载数据集
//    0.0 0.0 0.0
//    2 0.1 0.1 0.1
//    3 0.2 0.2 0.2
//    4 9.0 9.0 9.0
//    5 9.1 9.1 9.1
//    6 9.2 9.2 9.2

    val data = sc.textFile("/usr/local/spark/spark-data/data/class8/kmeans_data.txt",1)
    val parsedData = data.map(s=>Vectors.dense(s.split(' ').map(_.toDouble)))
    //将数据集聚类,2个类,20次迭代,进行模型训练形成数据模型
    val numClusters =2
    val numIterations = 20
    val model = KMeans.train(parsedData, numClusters, numIterations)
    //打印数据模型的中心点
    println("Cluster centers:")
    for(c <-model.clusterCenters){
      println(" "+c.toString)
    }

    //使用误差平方之和来评估数据模型,--------------------------------------模型在训练集上计算损失
    val cost=model.computeCost(parsedData)
    println("Within Set Sum of Squared Errors ="+cost)
    //使用模型测试单点数据-----------------------------------------------模型对测试样本分类
    println("Vectors 0.2 0.2 0.2 is belongs to clusters:" +
      model.predict(Vectors.dense("0.2 0.2 0.2".split(' ').map(_.toDouble))))  //1
    println("Vectors 0.25 0.25 0.25 is belongs to clusters:" +
      model.predict(Vectors.dense("0.25 0.25 0.25".split(' ').map(_.toDouble))))
    println("Vectors 8 8 8 is belongs to clusters:" + model.predict(Vectors.dense("8 8 8".split(' ').map(_.toDouble))))
   //交叉评估,之返回结果   testdata就是parseddata
    val testdata = data.map(s=>Vectors.dense(s.split(' ').map(_.toDouble)))
    val result1 = model.predict(testdata)
   //result1.saveAsTextFile("/usr/local/spark/spark-data/data/class8/result_kmeans1")

    result1.foreach(println)
    //交叉评估2,返回数据集和结果
//    val resutl2 = data.map{
//      line =>
//        val linevectore = Vectors.dense(line.split(' ').map(_.toDouble))
//        val prediction =model.predict(linevectore)
//        line+" "+prediction
//    }.saveAsTextFile("/usr/local/spark/spark-data/data/class8/result_kmeans2")
    sc.stop()
  }

}


















你可能感兴趣的:(spark,机器学习,kmeans,ml)