机器学习实战-KNN

工作原理:存在一个样本数据集合,也称作训练样本集,样本集中每个数据都存在标签,即已知样本集中每一数据与其所属分类的对应关系。当输入没有标签的新数据, 将新数据的每个特征与样本集中数据对应的特征进行比较,提取样本集中特征最相似数据(最近邻)的k个分类标签(K-近邻),最后选择k个最相似数据中出现次数最多的分类,作为新数据的分类。
python代码(python3版本):

from numpy import *
import operator
def createDataset():
    group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
    labels = ['A','A','B','B']
    return group, labels
# K-近邻算法
def classify0(inX, dataSet, labels, k):
    dataSetSize=dataSet.shape[0]
    diffMat=tile(inX, (dataSetSize,1))-dataSet
    sqDiffMat=diffMat**2
    # 每行元素相加
    sqDistances=sqDiffMat.sum(axis=1)
    distances=sqDistances**0.5
    # 排序输出其下标值
    sortedDistIndicies=distances.argsort()  
    classCount={}
    for i in range(k):
        voteIlabel=labels[sortedDistIndicies[i]]
        # 返回key为voteIlabel的value,如果没有这个元素则返回0,有就加1
        classCount[voteIlabel]=classCount.get(voteIlabel,0)+1
    # operator.itemgetter(1)表示对第二个域进行排序,reverse=True表示倒序排序
    sortedClassCount=sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
    return sortedClassCount[0][0]
# 将文本记录转换为numpy
def file2matrix(filename):
    fr=open(filename)
    arrayOLines=fr.readlines()
    numberOfLines=len(arrayOLines)
    # 用0填充二维数组,numberOfLines行3列
    returnMat=zeros((numberOfLines,3))
    classLabelVector=[]
    index=0
    for line in arrayOLines:
        line=line.strip()
        listFromLine=line.split('\t')
        returnMat[index,:]=listFromLine[0:3]
        classLabelVector.append(int(listFromLine[-1]))
        index+=1
    return returnMat,classLabelVector
# 归一化特征值
def autoNorm(dataSet):
    minVals=dataSet.min(0)
    maxVals=dataSet.max(0)
    ranges=maxVals-minVals
    normDataSet=zeros(shape(dataSet))
    m=dataSet.shape[0]
    normDataSet=dataSet-tile(minVals,(m,1))
    normDataSet=normDataSet/tile(ranges,(m,1))
    return normDataSet,ranges,minVals
# 针对约会网站的测试
def datingClassTest():
    hoRatio=0.10
    datingDataMat,datingLabels=file2matrix('datingTestSet2.txt')
    normMat,ranges,minVals=autoNorm(datingDataMat)
    m=normMat.shape[0]
    # 选出10%的数据进行测试
    numTestVecs=int(m*hoRatio)
    errorCount=0.0
    for i in range(numTestVecs):
        classifierResult=classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
        print('the classifier came back with: %d, the real answer is: %d' % (classifierResult,datingLabels[i]))
        if(classifierResult!=datingLabels[i]):
            errorCount+=1.0
    print('the total error rate is: %.2f%%' % (errorCount/float(numTestVecs)*100))
# 预测函数
def classifyPerson():
    resultList=['not at all','in small doses','in large doses']
    percentTats=float(input('percentage of time spent playing video games?'))
    ffMiles=float(input('frequent flier miles earned per year?'))
    iceCream=float(input('liters of ice cream consumed per year?'))
    datingDataMat,datingLabels=file2matrix('datingTestSet2.txt')
    normMat,ranges,minVals=autoNorm(datingDataMat)
    inArr=array([ffMiles,percentTats,iceCream])
    classifierResult=classify0((inArr-minVals)/ranges,normMat,datingLabels,3)
    print('you will probably like this person: ',resultList[classifierResult-1])

以上内容均来自《机器学习实战》

你可能感兴趣的:(机器学习实战-KNN)