统计学习方法 李航 最近邻模型 python sklearn 实现 及课后习题

  • 算法特点
    简单地说,k-近邻算法采用测量不同特征值之间的距离方法进行分类。
    优点:精度高、对异常值不敏感、无数据输入假定
    缺点:计算复杂度高、空间复杂度高
    适用数据范围:数值型和标称型

  • 李航:
    K近邻法(k-nearest neighbor, k-NN)是一种基本的分类与回归的方法,1968年由Cover和hart提出。k近邻的输入为实例的特征向量, 对应于特征空间的点,输出为实例的类别,可以取多类。 k近邻假设给定一个训练数据集,其中的实例类别已定。分类时,对新的实例,根据其k个最近邻的训练实例的 类别,通过多数表决等方式进行预测。因此,k近邻不具有显示的学习过程 k近邻法实际上利用训练数据集对特征向量空间的划分,作为其分类的模型。 k值的选择,距离的度量以及分类决策规则是K近邻的三个基本要素
    python代码所用数据为kaggle中mnist数据,将特征PCA至六维

# -*- coding: utf-8 -*-
"""
使用python实现的KNN算法进行分类的一个实例,
使用数据集是Kaggle数字手写体数据库
"""
import pandas as pd
import numpy as np
import math
import operator
from sklearn.decomposition import PCA

# 加载数据集
def load_data(filename, n, mode):
    data_pd = pd.read_csv(filename)
    data = np.asarray(data_pd)
    pca = PCA(n_components=n)
    if not mode == 'test':
        dateset = pca.fit_transform(data[:, 1:])
        return dateset, data[:, 0]
    else:
        dateset = pca.fit_transform(data)
        return dateset, 1

# 计算距离
def euclideanDistance(instance1, instance2, length):
    distance = 0
    for index in range(length):
        distance = pow((instance1[index] - instance2[index]), 2)
    return math.sqrt(distance)


# 返回K个最近邻
def getNeighbors(trainingSet, train_label, testInstance, k):
    distances = []
    length = len(testInstance) - 1
    # 计算每一个测试实例到训练集实例的距离
    for index in range(len(trainingSet)):
        dist = euclideanDistance(testInstance, trainingSet[index], length)
        distances.append(dist)
    # 对所有的距离进行排序
    sortedDistIndicies = np.asarray(distances).argsort()
    neighbors = []
    # 返回k个最近邻
    for index in range(k):
        dex = sortedDistIndicies[index]
        neighbors.append((dex, train_label[dex]))
    return neighbors


# 对k个近邻进行合并,返回value最大的key
def getResponse(neighbors):
    classVotes = {}
    for index in range(len(neighbors)):
        response = neighbors[index][-1]
        if response in classVotes:
            classVotes[response] += 1
        else:
            classVotes[response] = 1
    # 排序
    sortedVotes = sorted(classVotes.items(), key=operator.itemgetter(1), reverse=True)
    return sortedVotes[0][0]

def main(train_data_path, test_data_path, top_k, n_dim):
    train_data, train_label = load_data(train_data_path, n_dim, 'train')
    print("Train set :" + repr(len(train_data)))
    test_data, _ = load_data(test_data_path, n_dim, 'test')
    print("Test set :" + repr(len(test_data)))
    predictions = []
    for index in range(len(test_data)):
        neighbors = getNeighbors(train_data, train_label, test_data[index], top_k)
        result = getResponse(neighbors)
        predictions.append([index + 1, result])
        print(">Index : %s, predicted = %s" % (index + 1, result))
    columns = ['ImageId', 'Label']
    save_file = pd.DataFrame(columns=columns, data=predictions)
    save_file.to_csv('mm.csv', index=False, encoding="utf-8")

if __name__ == "__main__":
    train_data_path = 'train.csv'
    test_data_path = 'test.csv'
    top_k = 5
    n_dim = 6
    main(train_data_path, test_data_path, top_k, n_dim)

sklearn代码所用数据为kaggle中mnist数据,将特征PCA至六维

# -*- coding: utf-8 -*-
"""
使用sklearn实现的KNN算法进行分类的一个实例,
使用数据集是Kaggle数字手写体数据库
"""

import pandas as pd
import numpy as np
from sklearn import neighbors
from sklearn.decomposition import PCA
import sklearn

# 加载数据集
def load_data(filename, n, mode):
    data_pd = pd.read_csv(filename)
    data = np.asarray(data_pd)
    pca = PCA(n_components=n)
    if not mode == 'test':
        dateset = pca.fit_transform(data[:, 1:])
        return dateset, data[:, 0]
    else:
        dateset = pca.fit_transform(data)
        return dateset, 1

def main(train_data_path, test_data_path, n_dim):
    train_data, train_label = load_data(train_data_path, n_dim, 'train')
    print("Train set :" + repr(len(train_data)))
    test_data, _ = load_data(test_data_path, n_dim, 'test')
    print("Test set :" + repr(len(test_data)))

    knn = neighbors.KNeighborsClassifier()
    # 训练数据集
    knn.fit(train_data, train_label)
    # 训练准确率
    score = knn.score(train_data, train_label)
    print(">Training accuracy = " + repr(score))
    predictions = []
    for index in range(len(test_data)):
        # 预测
        result = knn.predict([test_data[index]])
        # 预测,返回概率数组
        predict2 = knn.predict_proba([test_data[index]])
        predictions.append([index + 1, result[0]])
        print(">Index : %s, predicted = %s" % (index + 1, result[0]))
    columns = ['ImageId', 'Label']
    save_file = pd.DataFrame(columns=columns, data=predictions)
    save_file.to_csv('m.csv', index=False, encoding="utf-8")

if __name__ == "__main__":
    train_data_path = 'train.csv'
    test_data_path = 'test.csv'
    n_dim = 6
    main(train_data_path, test_data_path, n_dim)

课后习题

喜欢的关注点赞哈

你可能感兴趣的:(统计学习方法 李航 最近邻模型 python sklearn 实现 及课后习题)