右下图中,绿色圆要被决定赋予哪个类,是红色三角形还是蓝色四方形?如果K=3,由于红色三角形所占比例为2/3,绿色圆将被赋予红色三角形那个类,如果K=5,由于蓝色四方形比例为3/5,因此绿色圆被赋予蓝色四方形类。
K最近邻(k-Nearest Neighbor,KNN)分类算法,是一个理论上比较成熟的方法,也是最简单的机器学习算法之一。该方法的思路是:如果一个样本在特征空间中的k个最相似(即特征空间中最邻近)的样本中的大多数属于某一个类别,则该样本也属于这个类别。
下面的KNNClassifier是模仿sklearn中的api实现的一个简单类
import numpy as np
from math import sqrt
from collections import Counter
# k近邻算法可以认为是一个没有训练过程的算法
# 对于knn来说,训练集就是模型
# 参数k,X_train训练集, y_train分类标签集, x新的点
class KNNClassifier:
def __init__(self, k):
"""初始化kNN分类器"""
assert k>=1, "k must be valid"
self.k = k
# 私有成员变量_
self._X_train = None
self._y_train = None
def fit(self, X_train, y_train):
"""根据训练数据集X_train和y_train训练kNN分类器"""
assert X_train.shape[0] == y_train.shape[0], \
"the size of X_train must be equal to the size of y_train."
assert self.k <= X_train.shape[0], \
"the size of X_train must be at least k."
self._X_train = X_train
self._y_train = y_train
return self
def predict(self, X_predict):
"""给定待测预测数据集X_predict,返回表示X_predict的结果向量集"""
assert self._X_train is not None and self._y_train is not None, \
"must fit before predict!"
assert X_predict.shape[1] == self._X_train.shape[1], \
"the feature number of X_predict must be similar to X_train."
y_predict = [self._predict(x) for x in X_predict]
return np.array(y_predict)
def _predict(self, x):
"""给定单个待测数据x,返回x的预测结果集"""
assert x.shape[0] == self._X_train.shape[1], \
"the feature number of x must be equal to X_train"
# 欧拉距离
distances = [sqrt(np.sum((x_train-x)**2))
for x_train in self._X_train]
# 求出距离最小的索引
nearest = np.argsort(distances)
# 前k个距离最小的标签的点集
topK_y = [self._y_train[i] for i in nearest[:self.k]]
# 投票统计
votes = Counter(topK_y)
# 返回票数最多的标签
return votes.most_common(1)[0][0]
def _accuracy_score(self, y_true, y_predict):
"""计算 y_true 和 y_predict 之间的准确率"""
assert y_true.shape[0] == y_predict.shape[0], \
"the size of y_true must be equal to the size of y_predict"
return sum(y_true == y_predict) / len(y_true)
def score(self, X_test, y_test):
"""根据测算数据集 X_test 和 y_test 确定当前模型的准确度"""
y_predict = self.predict(X_test)
return self._accuracy_score(y_test, y_predict)
def __repr__(self):
return "KNN(k=%d)" % self.k
model_selection模块下面train_test_split方法
import numpy as np
def train_test_split(X, y, test_ratio=0.2, seed=None):
"""将数据 X 和 y 按照test_ration分割成X_train, X_test, y_train, y_test"""
assert X.shape[0] == y.shape[0], \
"the size of X must be equal to the size of y"
assert 0.0 <= test_ratio <= 1.0, \
"test_ration must be valid"
if seed:
np.random.seed(seed)
# 随机打乱X
shuffled_indexes = np.random.permutation(len(X))
# 根据test_ration对打乱的索引进行切分
test_size = int(len(X) * test_ratio)
test_indexes = shuffled_indexes[:test_size]
train_indexes = shuffled_indexes[test_size:]
# funcyIndexing挑选数据
X_train = X[train_indexes]
y_train = y[train_indexes]
X_test = X[test_indexes]
y_test = y[test_indexes]
return X_train, X_test, y_train, y_test
导入库
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn import datasets
使用鸢尾花数据集
iris = datasets.load_iris()
iris.keys()
# 结果:data是数据特征集,target是分类集
dict_keys(['data', 'target', 'target_names', 'DESCR', 'feature_names'])
iris.data.shape
(150, 4)
# 使用两个特征
X = iris.data[:, :2]
X.shape
(150, 2)
y = iris.target
# 绘制散点图
plt.scatter(X[y==0, 0], X[y==0, 1], color="red", marker='o')
plt.scatter(X[y==1, 0], X[y==1, 1], color="blue", marker='x')
plt.scatter(X[y==2, 0], X[y==2, 1], color="green", marker='+')
可以看到该数据集具有明显的按照特征集群分布的特点,故可以使用KNN算法
#使用自己实现的模块
%run D:\机械学习\MySkl\model_selection.py
# 分类
X_train, X_test, y_train, y_test = train_test_split(x,y)
# 打印结果
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
(120, 4)
(120,)
(30, 4)
(30,)
%run D:\机械学习\MySki\KNN_classify.py
# 使用k值为3,用最近的3个点进行分析
knn_clf = KNNClassifier(k=3)
KNN(k=3)
# 传入训练数据
my_knn_clf.fit(X_train, y_train)
# 进行预测
y_predict = my_knn_clf.predict(X_test)
y_predict
# 预测结果:
array([0, 0, 0, 2, 2, 2, 1, 1, 0, 1, 0, 0, 2, 1, 2, 1, 2, 0, 2, 1, 0, 0,
0, 0, 1, 1, 0, 1, 0, 2])
# 真实结果
y_test
array([0, 0, 0, 2, 2, 2, 1, 1, 0, 1, 0, 0, 2, 1, 2, 1, 2, 0, 2, 1, 0, 0,
0, 0, 1, 1, 0, 1, 0, 2])
# 这次的结果正确率为100%(每次预测的结果可能不一样,受到随机分类的随机种子的影响)
sum(y_predict == y_test)/len(y_test)
1.0
# 数据一样,结果一样
my_knn_clf.score(X_test, y_test)
1.0
# 导入相应的库
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import datasets
# 加载手写数字的数据集
digits = datasets.load_digits()
# 查看内容
digits.keys()
dict_keys(['data', 'target', 'target_names', 'images', 'DESCR'])
# 显示部分结果,这是一个8*8的图片
print(digits['DESCR'])
Notes
-----
Data Set Characteristics:
:Number of Instances: 5620
:Number of Attributes: 64
:Attribute Information: 8x8 image of integer pixels in the range 0..16.
:Missing Attribute Values: None
:Creator: E. Alpaydin (alpaydin '@' boun.edu.tr)
:Date: July; 1998
# 该数据集有部分数据
# 特征集
x = digits.data
x.shape
(1797, 64)
# 特征集对应分类集
y = digits.target
y.shape
(1797,)
# 分类标签
digits.target_names
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# 查看显示第666个数据,为0
some_digit = x[666]
y[666]
0
# 绘制该数字
some_digit_image = some_digit.reshape(8,8)
plt.imshow(some_digit_image, cmap = matplotlib.cm.binary)
plt.show()
# 导入分类模块
from sklearn.model_selection import train_test_split
# 进行训练数据和测试数据的分类,随机种子为666可以复现结果
X_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=666)
# 导入knn的模块KNeighborsClassifier模块
from sklearn.neighbors import KNeighborsClassifier
# n_neighbors参数即k值
knn_clf = KNeighborsClassifier(n_neighbors=3)
# 模型拟合,传入训练数据
knn_clf.fit(X_train, y_train)
# 返回结果
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=3, p=2,
weights='uniform')
# 对测试数据进行预测,返回预测结果集
y_predict = knn_clf.predict(x_test)
# 导入计算正确率的模块
from sklearn.metrics import accuracy_score
# 传入真实结果和测试结果,返回正确率
accuracy_score(y_test, y_predict)
0.9888888888888889
# 直接传入测试数据进行模型评估
knn_clf.score(x_test, y_test)
0.9888888888888889
# 没有使用随机种子的完整代码,算正确率
import numpy as np
from sklearn import datasets
digits = datasets.load_digits()
x = digits.data
y = digits.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_neighbors=3)
knn_clf.fit(X_train, y_train)
knn_clf.score(X_test, y_test)
# 正确率
0.9916666666666667
简单使用循环寻找最好的k值
import numpy as np
from sklearn import datasets
digits = datasets.load_digits()
x = digits.data
y = digits.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
from sklearn.neighbors import KNeighborsClassifier
# 使用循环寻找最好的k值
best_score = 0.0
best_k = -1
for k in range(1,11):
knn_clf = KNeighborsClassifier(n_neighbors=k)
knn_clf.fit(X_train, y_train)
score = knn_clf.score(X_test, y_test)
if score > best_score:
best_k = k
best_score = score
print("best_k = ", best_k)
print("best_score = ", best_score)
best_k = 5
best_score = 0.9944444444444445
我们默认使用的是欧拉距离
当一个点跟其他分类之间出现平票的时候,如果不考虑距离的话是会进行随机分类的
考虑距离的话则会选择与点之间的距离最小的那一个分类
距离计算:取每个特征距离的大小的倒数和 sum( 1/n )
# weights参数默认值'uniform'不考虑距离,'distance'为考虑距离
best_method = ""
best_score = 0.0
best_k = -1
for method in ['uniform', 'distance']:
for k in range(1,11):
knn_clf = KNeighborsClassifier(n_neighbors=k, weights=method)
knn_clf.fit(X_train, y_train)
score = knn_clf.score(X_test, y_test)
if score > best_score:
best_k = k
best_score = score
best_method = method
print("best_k = ", best_k)
print("best_score = ", best_score)
print("best_method = ", best_method)
best_k = 5
best_score = 0.9944444444444445
best_method = uniform
# 寻找最好的距离参数p
%%time
best_p = -1
best_score = 0.0
best_k = -1
for k in range(1,11):
for p in range (1,6):
knn_clf = KNeighborsClassifier(n_neighbors=k,weights='distance', p=p,metric='minkowski')
knn_clf.fit(X_train, y_train)
score = knn_clf.score(X_test, y_test)
if score > best_score:
best_k = k
best_score = score
best_p= p
print("best_k = ", best_k)
print("best_score = ", best_score)
print("best_p = ", best_p)
best_k = 3
best_score = 0.9972222222222222
best_p = 5
Wall time: 22.3 s
# 该参数是一个列表,里面的字典,是要搜索的内容,weights为 'uniform'时不使用距离p参数
param_grid = [
{
'weights' : ['uniform'],
'n_neighbors':[i for i in range(1, 11)]
},
{
'weights':['distance'],
'n_neighbors' : [i for i in range(1,11)],
'p' : [i for i in range(1, 6)]
}
]
knn_clf = KNeighborsClassifier()
# 导入网格包搜索的模块GridSearchCV
from sklearn.model_selection import GridSearchCV
# 传入训练模型,参数集
grid_search = GridSearchCV(knn_clf, param_grid)
# 执行搜索
%%time
grid_search.fit(X_train, y_train)
# 搜索时间比较长
Wall time: 3min 2s
Out[18]:
GridSearchCV(cv=None, error_score='raise',
estimator=KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=5, p=2,
weights='uniform'),
fit_params=None, iid=True, n_jobs=1,
param_grid=[{'weights': ['uniform'], 'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}, {'weights': ['distance'], 'n_neighbors': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'p': [1, 2, 3, 4, 5]}],
pre_dispatch='2*n_jobs', refit=True, return_train_score='warn',
scoring=None, verbose=0)
# 结果集
# 最优参数
grid_search.best_estimator_
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=5, p=2,
weights='distance')
# 最优成绩
grid_search.best_score_
0.988169798190675
# 最优搜索参数
grid_search.best_params_
{'n_neighbors': 5, 'p': 2, 'weights': 'distance'}
# 获得最好的评估器
knn_clf = grid_search.best_estimator_
# 对测试数据进行预测的成绩
knn_clf.score(X_test, y_test)
0.9944444444444445
%%time
# n_jobs为使用核数,-1使用所有核,verbose边搜索边输出,使用一个整数,数越大输出越详细
grid_search = GridSearchCV(knn_clf, param_grid, n_jobs=-1, verbose=3)
grid_search.fit(X_train, y_train)
# 这次搜索使用了1.2分钟,大大缩短了时间
Fitting 3 folds for each of 60 candidates, totalling 180 fits
[Parallel(n_jobs=-1)]: Done 24 tasks | elapsed: 2.5s
[Parallel(n_jobs=-1)]: Done 120 tasks | elapsed: 39.9s
Wall time: 1min 13s
[Parallel(n_jobs=-1)]: Done 180 out of 180 | elapsed: 1.2min finished
适用于分布有明显边界的情况,受outlier影响较大
比如有大部分数据都比较小,然后出现了一个特别大的数据,那么Xmax就很大,对Xscale影响很大,导致预测准确率下降
均值方差归一化standardization
实现StandarScalar类,用于理解原理
import numpy as np
class StandardScalar:
def __init__(self):
self.mean_ = None
self.scale_ = None
def fit(self, X):
"""根据训练数据集X获得数据的均值和方差"""
assert X.ndim == 2, "The dimension of X must be 2"
# 根据传入的标准数据计算 均值mean_ 和 方差scale_
self.mean_ = np.array([np.mean(X[:, i]) for i in range(X.shape[1])])
self.scale_ = np.array([np.std(X[:, i]) for i in range(X.shape[1])])
return self
def transform(self, X):
"""将X根据这个StandarScaler进行均值方差归一化处理"""
assert X.ndim == 2, "The dimension of X must be 2"
assert self.mean_ is not None and self.scale_ is not None, \
"must fit before transform!"
assert X.shape[1] == len(self.mean_), \
"The feature number of X must be equal to mean_ and std_"
# 先构建一个空的数组
resX = np.empty(shape=X.shape, dtype=float)
# 把根据公式归一化的数据放入数组中
for col in range(X.shape[1]):
resX[:,col] = (X[:,col] - self.mean_[col]) / self.scale_[col]
return resX
import numpy as np
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target)
# 导入归一化模块
from sklearn.preprocessing import StandardScaler
# 使用该类
standarScaler = StandardScaler()
# 传入训练数据作为标准
standarScaler.fit(X_train)
StandardScaler(copy=True, with_mean=True, with_std=True)
# 获取每个特征的均值
standarScaler.mean_
array([5.80178571, 3.11517857, 3.64553571, 1.15446429])
# 获取每个特征的方差(数据分布范围)
array([0.82721371, 0.43838376, 1.78829969, 0.77100218])
# 获得归一化的训练数据
X_train = standarScaler.transform(X_train)
# 获得归一化的测试数据
X_test_standar = standarScaler.transform(X_test)
# 查看获得的数据的均值和方差的情况
np.mean(X_train)
1.2965818894729507e-15 # 负15次方非常接近0
np.std(X_train)
1.0
# 进行预测
from sklearn.neighbors import KNeighborsClassifier
knn_clf = KNeighborsClassifier(n_neighbors=3)
knn_clf.fit(X_train, y_train)
knn_clf.score(X_test_standar, y_test)
# 成绩
0.9736842105263158
缺点1:效率低下,每个数据都需要O(n*m)
缺点2:高度数据相关
缺点3:预测结果不具有可解释性
缺点4: 维数灾难,随着维度的增加,“看似相近”的两个点之间的距离越来越大,解决方法:降维
1.简单,易于理解,易于实现,无需估计参数,无需训练;
2. 适合对稀有事件进行分类;
3.特别适合于多分类问题(multi-modal,对象具有多个类别标签), kNN比SVM的表现要好。