k近邻算法

k近邻算法

《机器学习实战》学习总结

一言以蔽之

计算待分类点与训练集中每个点之间的距离,选取前k个最近的点,
其中出现频率最高的类别就是待分类点的类别。

伪代码

1.计算训练集中的点到当前点之间的距离;
2.按照距离递增顺序排序;
3.选取与当前点距离最小的k个点;
4.确定前k个点所在类别的出现频率;
5.返回前k个点出现频率最高的类别作为当前点的预测分类。

Pros & Cons

优点:精度高,对异常值不敏感,无数据输入假定。
缺点:计算复杂度高,空间复杂度高。
适用数据范围:数值型和标称型。

python代码实现

  • 数据预处理
  • 模型训练(KNN中没有该步骤)
  • 性能测试
  • 算法应用
# -*- coding: utf-8 -*-
# kNN: k Nearest Neighbors
#
from numpy import *
import operator
from os import listdir

# knn algorithm
def classify0(inX,dataSet,labels,k):
    dataSetSize = dataSet.shape[0]
    diffMat = tile(inX, (dataSetSize,1)) - dataSet
    sqDiffMat = diffMat**2
    sqDistances = sqDiffMat.sum(axis=1)
    distances = sqDistances**0.5
    sortedDistIndicies = distances.argsort()
    classCount={}
    for i in range(k):
        voteIlabel = labels[sortedDistIndicies[i]]
        classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]

def file2matrix(filename):
    fr = open(filename)
    numberOfLines = len(fr.readlines())         #get the number of lines in the file
    returnMat = zeros((numberOfLines,3))        #prepare matrix to return
    classLabelVector = []                       #prepare labels return
    fr = open(filename)
    index = 0
    for line in fr.readlines():
        line = line.strip()
        listFromLine = line.split('\t')
        returnMat[index,:] = listFromLine[0:3]
        classLabelVector.append(int(listFromLine[-1]))
        index += 1
    return returnMat,classLabelVector

## normalization
def autoNorm(dataSet):
    minVals = dataSet.min(0)
    maxVals = dataSet.max(0)
    ranges = maxVals - minVals
    normDataSet = zeros(shape(dataSet))
    m = dataSet.shape[0]
    normDataSet = dataSet - tile(minVals, (m,1))
    normDataSet = normDataSet/tile(ranges, (m,1))   #element wise divide
    return normDataSet, ranges, minVals

## dating website example
def datingClassTest():
    hoRatio = 0.10      #hold out 10%
    datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')       #load data setfrom file
    normMat, ranges, minVals = autoNorm(datingDataMat)
    m = normMat.shape[0]
    numTestVecs = int(m*hoRatio)
    errorCount = 0.0
    for i in range(numTestVecs):
        classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
        print "#%d: the classifier came back with: %d, the real answer is: %d" % (i,classifierResult, datingLabels[i])
        if (classifierResult != datingLabels[i]): errorCount += 1.0
    print "errorCount: %d" % errorCount
    print "the total error rate is: %f" % (errorCount/float(numTestVecs))


## application
def classifyPerson():
    resultList = ['not at all','in small doses','in large doses']
    # require input
    percentGames = float(raw_input("percentage of time spent on playing video games: "))
    ffMiles = float(raw_input("frequent flier miles earned per year: "))
    iceCreams = float(raw_input("liters of ice cream consumed per year "))
    # training data preprocessing
    datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
    normMat,ranges,minVals = autoNorm(datingDataMat)
    # predict input
    inArr = array([ffMiles,percentGames,iceCreams])
    classifierResult = classify0((inArr-minVals)/ranges,normMat,datingLabels,3)
    # test data need to be normalized
    print "You will probably like this persion: " , resultList[classifierResult-1]


def img2vector(filename):
    returnVect = zeros((1,1024))
    fr = open(filename)
    for i in range(32):
        lineStr = fr.readline()
        for j in range(32):
            returnVect[0,32*i+j] = int(lineStr[j])
    return returnVect

## handwriting example
def handwritingClassTest():
    hwLabels = []
    trainingFileList = listdir('trainingDigits')           #load the training set
    m = len(trainingFileList)
    trainingMat = zeros((m,1024))
    for i in range(m):
        fileNameStr = trainingFileList[i]
        fileStr = fileNameStr.split('.')[0]     #take off .txt
        classNumStr = int(fileStr.split('_')[0])
        hwLabels.append(classNumStr)
        trainingMat[i,:] = img2vector('trainingDigits/%s' % fileNameStr)
    testFileList = listdir('testDigits')        #iterate through the test set
    errorCount = 0.0
    mTest = len(testFileList)
    for i in range(mTest):
        fileNameStr = testFileList[i]
        fileStr = fileNameStr.split('.')[0]     #take off .txt
        classNumStr = int(fileStr.split('_')[0])
        vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
        classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
        print "#%d: the classifier came back with: %d, the real answer is: %d" \
            % (i,classifierResult, classNumStr)
        if (classifierResult != classNumStr): errorCount += 1.0
    print "\nthe total number of errors is: %d" % errorCount
    print "\nthe total error rate is: %f" % (errorCount/float(mTest))

## main
if __name__ == '__main__':
    print "\n#-------------Dating website example------------------#\n"
    print "\n-----------------test-----------------"
    datingClassTest()
    print "\n---------------prediction-------------"
    classifyPerson()

    print "\n#--------------Handwriting example-------------------#\n"
    print "\n-----------------test-----------------"
    handwritingClassTest()

matlab代码实现

  • 主程序
%% KNN: K Nearest Neighborhood % Initialization
clear;close all;clc;
data_file_name = 'datingTestSet2.txt';

%% load data data = load(data_file_name); trainData = data(:,1:3); y = data(:,4); %% visualization
X = trainData(:,1:2);   % plot first two features
figure;
hold on;
idx1 = find(y==1);
idx2 = find(y==2);
idx3 = find(y==3);
plot(X(idx1,1),X(idx1,2),'k+','LineWidth',2,'MarkerSize',7);
plot(X(idx2,1),X(idx2,2),'ko','MarkerFaceColor','y','MarkerSize',7);
plot(X(idx3,1),X(idx3,2),'b*');
hold off;

%% normalization minVals = min(trainData,[],1); maxVals = max(trainData,[],1); ranges = maxVals-minVals; normData = zeros(size(trainData)); num_example = size(trainData,1); normData = trainData-repmat(minVals,[num_example,1]); normData = normData./repmat(ranges,[num_example,1]); % knn algorithm
%label_pred = my_knn(inX,data,label,k);

% test
hold_out_ratio = 0.1;
error_count = 0;
k = 3;
num_test = round(num_example * hold_out_ratio); % round-四舍五入;floor-朝负无穷方向舍入;ceil-正无穷;fix-0.
for i = 1:num_test
    label_pred = my_knn(normData(i,:),normData(num_test:num_example,:),y(num_test:num_example),k);
    fprintf('#%d: the classifier came back with label: %d, the real label is: %d.\n',i,label_pred,y(i));
    if(label_pred ~= y(i))
        error_count = error_count + 1;
    end
end

% evaluate
err = error_count/num_test;
fprintf('error count: %d\n',error_count);
fprintf('the total error rate is %.2f%%.\n',err*100);
  • knn算法实现(my_knn.m)
function label_pred = my_knn(inX,data,label,k)
% INPUT: (归一化之后的数据)
% inX--待预测样本
% data--训练集数据
% label--训练集类标
% k--knn参数
% OUTPUT:
% 待预测样本的类标

row_data = size(data,1);
diffMat = data - repmat(inX,[row_data,1]);
sqDiffMat = diffMat.^2;
sqDistance = sum(sqDiffMat,2);
distance = sqrt(sqDistance);
[D,I] = sort(distance);
vote_label = zeros(k,1);
for i = 1:k
    vote_label(i) = label(I(i));
end
unique_label = unique(vote_label);      % 找出每个出现过的类标
n = histc(vote_label,unique_label);     % 计算每个类标出现的次数
[~,idx] = max(n);
label_pred = unique_label(idx);      % 找到出现次数最多的类标

你可能感兴趣的:(python,matlab,knn,k近邻)