import numpy as np
import matplotlib.pyplot as plt
# 加载数据
def loadDataSet(fileName):
data = np.loadtxt(fileName,delimiter=',')
return data
# 欧式距离计算
def distEclud(x,y):
return np.sqrt(np.sum((x-y)**2))
# 为给定数据集构建一个包含K个随机质心的集合
def randCent(dataSet,k):
# m为行数,n为列数
m,n = dataSet.shape
centroids = np.zeros((k,n))
for i in range(k):
index = int(np.random.uniform(0,m))
centroids[i,:] = dataSet[index,:]
return centroids
# k均值聚类
def KMeans(dataSet,k):
# 行的数目
m = np.shape(dataSet)[0]
# 第一列存样本属于哪个簇 第二列存样本到簇的中心点的误差
clusterAssment = np.mat(np.zeros((m,2)))
clusterChange = True
# 第一步:初始化centroids
centroids = randCent(dataSet,k)
while clusterChange:
clusterChange = False
# 遍历样本
for i in range(m):
minDist = 100000.0
minIndex = -1
# 遍历质心
# 第二步 找出最近的质心
for j in range(k):
# 计算欧式距离
distance = distEclud(centroids[j,:],dataSet[i,:])
if distance < minDist:
minDist = distance
minIndex = j
# 第三步 更新每行样本所属的簇
if clusterAssment[i,0] != minIndex:
clusterChange = True
clusterAssment[i,:] = minIndex,minDist**2
# 第四步 更新质心
for j in range(k):
# 获取聚类所有的点
pointInCluster = dataSet[np.nonzero(clusterAssment[:,0].A == j)[0]]
# 对矩阵的行求均值
centroids[j,:] = np.mean(pointInCluster,axis=0)
print("Congratulations,cluster complete!")
return centroids,clusterAssment
def showCluster(dataSet,k,centroids,clusterAssment):
m,n = dataSet.shape
if n != 2:
print("数据不是二维的")
return 1
mark = ['or', 'ob', 'og', 'ok', '^r', '+r', 'sr', 'dr', ' len(mark):
print("k值太大了")
return 1
# 绘制样本
for i in range(m):
markIndex = int(clusterAssment[i,0])
plt.plot(dataSet[i,0],dataSet[i,1],mark[markIndex])
mark = ['Dr', 'Db', 'Dg', 'Dk', '^b', '+b', 'sb', 'db', '
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
# 加载数据
def loadDataSet(fileName):
data = np.loadtxt(fileName,delimiter=',')
return data
x = loadDataSet("test.txt")
y_pred = KMeans(n_clusters=3, random_state=9).fit_predict(x)
plt.scatter(x[:, 0], x[:, 1], c=y_pred)
plt.show()
评价指标:Silhouette Coefficient和Calinski-Harabasz Index
(1)Silhouette Coefficient分数值s的数学计算公式为:
a(i)是i到同簇样本的平均距离,为样本i的簇内不相似度,b(i)为i到其他簇样本的平均距离,为样本i的簇间不相似度。
s(i)接近1,样本在该簇合理;s(i)接近-1,样本i应该分到其他簇。
(2)Calinski-Harabasz分数值s的数学计算公式是:
s(k)=tr(Bk)(m-k)/tr(Wk)(k−1)*
其中m为训练集样本数,k为类别数。Bk为类别之间的协方差矩阵,Wk为类别内部数据的协方差矩阵。tr为矩阵的迹。类间差距越大,类内差距越小,s越大,聚类效果越好。
代码实现:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
# 加载数据
def loadDataSet(fileName):
data = np.loadtxt(fileName,delimiter=',')
return data
x = loadDataSet("test.txt")
y_pred = KMeans(n_clusters=3, random_state=9).fit_predict(x)
caha=metrics.calinski_harabasz_score(x, y_pred)
print(caha)
plt.scatter(x[:, 0], x[:, 1], c=y_pred)
plt.show()
借鉴:
SKlearn里面的K-means使用详解
关于K-means聚类算法重点理解