原文:https://blog.csdn.net/uyy203/article/details/90735664
聚类问题是数据挖掘的基本问题,它的本质是将n 个数据对象划分为k个聚类,以便使得所获得的聚 类满足以下条件:同一聚类中的数据对象相似度较 高;不同聚类中的对象相似度较小。
它的基本思想是以空间中k个点为中心,进行聚类 ,对最靠近他们的对象归类。通过迭代的方法,逐 次更新各聚类中心的值,直至得到最好的聚类结果 次更新各聚类中心的值,直至得到最好的聚类结果 。
原始数据:
划分聚类数据:
算法的基本步骤
第一步:从n个数据对象任意选择k个对象作为初始聚类中心,并设定最大迭代次数;
第二步:计算每个对象与k个中心点的距离,并根据最小距离对相应对象进行划分,即,把对象划分到与他们最近的中心所代表的类别中去 ;
第三步:对于每一个中心点,遍历他们所包含的对象,计算这些对象所有维度的和的均值,获得新的中心点;
第四步:如果聚类中心与上次迭代之前相比,有所改变,或者,算法迭代次数小于给定的最大迭代次数,则继续执行第2 、3两步,否则,程序结束返回聚类结果。
K-means算法运行过程
def main():
#step1: load data
print("load data...")
dataSet=[]
dataSetFile = open('./testSet/testSet.txt')
for line in dataSetFile:
lineAttrubute = line.strip().split('\t')
dataSet.append([float(lineAttrubute[0]),float(lineAttrubute[1])])
#step2: clustering
print("clustering...")
dataSet=np.mat(dataSet)
k=4
n=10000
centers_result,clusters_assignment_result = kmeans(dataSet,k,n)
#step3: show the clusters and centers
print("show the clusters and centers...")
showCluster(dataSet,k,centers_result,clusters_assignment_result)
initialCenters函数通过使用numpy库的 Initialize center函数通过使用numpy库的 zeros函数和random.uniform函数,随机选取 了k个数据做聚类中心,并将结果存放在 了k个数据做聚类中心,并将结果存放在 Numpy的Array对象centers中
#create centers, the number of centers is k
def initialCenters(data,k):
nameSample,dim = data.shape
centers = np.zeros((k,dim))
for i in range(k):
index = int(np.random.uniform(0,nameSample))
centers[i,:] = data[index,:]
return centers
distanceToCenters这个函数用来计算一个数据点到所有 聚类中心的距离,将其存放在distance2Centers 中返回
#calculate distance from each point to each center
def distanceToCenters(sample, centers):
k = centers.shape[0]
distance2Centers = np.zeros(k)
for i in range(k):
distance2Centers[i] = np.sqrt(np.sum(power((sample-centers[i,:]),2)))
return distance2Centers
这部分代码完成了k-means算法中为数据点决定所属类别以及迭代更新类中心点的主要功能。
请注意numpy库的返回最小值索引的argmin函数,以及计算平均值的mean函数的使用方法。
#k-means
def kmeans(data,k,n):
#initialize
iterCount = 0
centerChanged = True
numSample = data.shape[0]
centersAssignment = np.zeros(numSample)
#step1 find the centers by random
centers = initialCenters(data,k)
while centerChanged and iterCount < n:
#step2 calculate and mark index of the closest center from each point to create the clusters
centerChanged = False
iterCount = iterCount+1
for i in range(numSample):
sample2Centers = distanceToCenters(data[i,:],centers)
minIndex = np.argmin(sample2Centers)
if centersAssignment[i] != minIndex:
centersAssignment[i] = minIndex
centerChanged = True
#step3 calculate the mean point in each cluster, which become new center of each cluster
for j in range(k):
pointsInCluster = data[np.nonzero(centersAssignment[:] == j)[0]]
centers[j,:] = np.mean(pointsInCluster , axis = 0)
return centers,centersAssignment
showCluster函数中,利用matplotlib库的plot函数将不同类别数据以不同颜色展现出来
def showCluster(data,k,centers,clustersAssignment):
numSample = data.shape[0]
#draw all samples
mark = ['or','ob','og','om']
for i in range(numSample):
markIndex = int(clustersAssignment[i])
plt.plot(data[i,0],data[i,1],mark[markIndex])
#draw the centers
mark = ['Dr','Db','Dg','Dm']
for i in range(k):
plt.plot(centers[i,0],centers[i,1],mark[i],markersize=10)
plt.show()
完整代码:
#k-means
#author xyz.
from numpy import *
import numpy as np
from matplotlib import *
import matplotlib.pyplot as plt
#create centers, the number of centers is k
def initialCenters(data,k):
nameSample,dim = data.shape
centers = np.zeros((k,dim))
for i in range(k):
index = int(np.random.uniform(0,nameSample))
centers[i,:] = data[index,:]
return centers
#calculate distance from each point to each center
def distanceToCenters(sample, centers):
k = centers.shape[0]
distance2Centers = np.zeros(k)
for i in range(k):
distance2Centers[i] = np.sqrt(np.sum(power((sample-centers[i,:]),2)))
return distance2Centers
#k-means
def kmeans(data,k,n):
#initialize
iterCount = 0
centerChanged = True
numSample = data.shape[0]
centersAssignment = np.zeros(numSample)
#step1 find the centers by random
centers = initialCenters(data,k)
while centerChanged and iterCount < n:
#step2 calculate and mark index of the closest center from each point to create the clusters
centerChanged = False
iterCount = iterCount+1
for i in range(numSample):
sample2Centers = distanceToCenters(data[i,:],centers)
minIndex = np.argmin(sample2Centers)
if centersAssignment[i] != minIndex:
centersAssignment[i] = minIndex
centerChanged = True
#step3 calculate the mean point in each cluster, which become new center of each cluster
for j in range(k):
pointsInCluster = data[np.nonzero(centersAssignment[:] == j)[0]]
centers[j,:] = np.mean(pointsInCluster , axis = 0)
return centers,centersAssignment
def showCluster(data,k,centers,clustersAssignment):
numSample = data.shape[0]
#draw all samples
mark = ['or','ob','og','om']
for i in range(numSample):
markIndex = int(clustersAssignment[i])
plt.plot(data[i,0],data[i,1],mark[markIndex])
#draw the centers
mark = ['Dr','Db','Dg','Dm']
for i in range(k):
plt.plot(centers[i,0],centers[i,1],mark[i],markersize=10)
plt.show()
def main():
#step1: load data
print("load data...")
dataSet=[]
dataSetFile = open('./testSet/testSet.txt')
for line in dataSetFile:
lineAttrubute = line.strip().split('\t')
dataSet.append([float(lineAttrubute[0]),float(lineAttrubute[1])])
#step2: clustering
print("clustering...")
dataSet=np.mat(dataSet)
k=4
n=10000
centers_result,clusters_assignment_result = kmeans(dataSet,k,n)
#step3: show the clusters and centers
print("show the clusters and centers...")
showCluster(dataSet,k,centers_result,clusters_assignment_result)
if __name__=="__main__":
main()