K-近邻算法(KNN)是最简单的的分类算法,采用测量不同特征值之间的距离方法进行分类,精度高,对异常数据不敏感,但是缺点也很明显,计算复杂度高 ,空间复杂度高。
import numpy as np
import operator
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]#读取dataSet的列数
#计算欧式距离
diffMat = np.tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()#返回数组从小到大的索引值
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]#从小到大的距离的对应标签
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1#记录不同标签的出现次数
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]#返回距离最近的标签
def createDataSet():
group = np.array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1],[1,0],[1.0,0.1],[0,1.1],[0,1]])
labels = ['A','A','B','B','C','C','D','D']
return group, labels
if __name__ == '__main__':
print 'example:'
g,l=createDataSet()
print classify0([0.2,1],g,l,3)