pyspark:k均值

from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.ml.linalg import Vectors
from pyspark.ml.clustering import KMeans
import pandas as pd

if __name__ == "__main__":
    appname = "KMeans"
    master ="local[4]" 
    conf = SparkConf().setAppName(appname).setMaster(master)  #spark配置                
    spark=SparkSession.builder.config(conf=conf).getOrCreate()#spark实例化
#读取数据
    data = [(Vectors.dense([0.0, 0.0]),), (Vectors.dense([1.0, 1.0]),),\
           (Vectors.dense([9.0, 8.0]),), (Vectors.dense([8.0, 9.0]),)]    
    df = spark.createDataFrame(data, ["features"])

#用k均值进行聚类    
    kmeans = KMeans(k=2, seed=1)
    model = kmeans.fit(df)
    centers = model.clusterCenters()
    model.computeCost(df)#每个点到其中心距离平方和
    result = model.transform(df).select("features", "prediction")#输出聚类结果

#将聚类结果转为python中的dataframe
    columns=result.columns#提取强表字段
    result=result.collect()#
    result=pd.DataFrame(result,columns=columns)#转为python中的dataframe
    print(result)
    spark.stop()

运行结果:

 pyspark:k均值_第1张图片

 

你可能感兴趣的:(k均值,k-means,python,机器学习,pyspark)