机器学习实战之KMeans算法pandas实现

这次写的恶心死我了,第一次随机选中心点的代码有问题还是怎么的,导致第一轮过完,可能会出现某个中心点根本就完全不合适,没有任何一个点会标记为这个中心点,然后报错。导致我的这个代码时灵时不灵,开始完全想不到bug的原因。
而且虽然用pandas来自己实现确实能帮忙巩固pandas的知识,但我还是觉得以前是走入了误区。机器学习重要的部分应该是对理论的理解和轮子的使用,至于书中的代码,理解下理论实现的具体过程就行了,自己在这费心费力造轮子实在是意义不大,虽然在造的过程中也能加深理解,但事倍功半
所以还是决定以后学习就结合西瓜书的理论,实战中的实现思路和sklearn的代码实现了

import numpy as np
import pandas as pd
from pandas import DataFrame,Series

path = r'C:\Users\36955\Downloads\mlp\Ch10\testSet2.txt'
data = pd.read_csv(path,sep='\t',header=None)

def randCent(dataSet, k):
    n = dataSet.shape[1]
    centroids = DataFrame(np.zeros((k,n)))
    data_min = dataSet.min(0)
    data_range = dataSet.max(0)-data_min
    for j in range(n):
        minJ = data_min[j]
        rangeJ = float(data_range[j])
        centroids.iloc[:,j] = minJ + rangeJ * np.random.rand(k,1)
    return centroids

def distEclud(vecA, vecB):
    return np.sqrt(np.sum(np.power(vecA - vecB, 2)))

def kMeans(data,k):
    m = data.shape[0]
    clusterAssment = DataFrame(np.zeros((m,2)),columns=['clusterName','dist'])
    centroids = randCent(data,k)
    clusterChanged = True
    while clusterChanged:
        clusterChanged = False
        for i in range(m):
            minDist = np.inf;minIndex = -1
            for j in range(k):
                #print (i,j,m,len(centroids))
                dist = distEclud(data.iloc[i,:],centroids.iloc[j,:])
                if dist < minDist:
                    minDist = dist;minIndex = j
            if clusterAssment.iloc[i,0] != minIndex:
                clusterChanged = True
            clusterAssment.iloc[i,:] = minIndex,minDist**2
        #print centroids
        centroids = data.groupby(clusterAssment.clusterName).mean()
    return centroids,clusterAssment

#print kMeans(data,3)

def biKmeans(data,k):
    m = data.shape[0]
    clusterAssment = DataFrame(np.zeros((m,2)),columns=['clusterName','dist'])
    centroid0 = list(data.mean(0))
    centList = [centroid0]
    while len(centList)for i in range(len(centList)):
            splitCluster = data.iloc[clusterAssment[clusterAssment.clusterName==i].index,:]
            splitCentroids,splitClusterAssement = kMeans(splitCluster,2)
            splitSSE = splitClusterAssement.dist.sum()
            nosplitSSE = clusterAssment[clusterAssment.clusterName!=i].dist.sum()
            if (splitSSE+nosplitSSE)0,:])
        centList.append(list(newCentroids.iloc[1,:]))
        newAssement.clusterName[newAssement.clusterName==0] = besttosplit
        newAssement.clusterName[newAssement.clusterName==1] = len(centList)-1
        clusterAssment=clusterAssment[clusterAssment.clusterName!=besttosplit]
        clusterAssment = pd.concat([clusterAssment,newAssement],axis=0)
    return centList,clusterAssment

print biKmeans(data,4)

你可能感兴趣的:(机器学习,机器学习,kmeans-python)