模型评判:
对测试数据探索
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
iris = datasets.load_iris()
X = iris.data
y = iris.target
X.shape
Out[3]:
(150, 4)
In [4]:
# 方法1# 使用concatenate函数进行拼接,因为传入的矩阵必须具有相同的形状。因此需要对label进行reshape操作,reshape(-1,1)表示行数自动计算,1列。axis=1表示纵向拼接。
tempConcat = np.concatenate((X, y.reshape(-1,1)), axis=1)
# 拼接好后,直接进行乱序操作np.random.shuffle(tempConcat)
# 再将shuffle后的数组使用split方法拆分
shuffle_X,shuffle_y = np.split(tempConcat, [4], axis=1)
# 设置划分的比例
test_ratio = 0.2
test_size = int(len(X) * test_ratio)
X_train = shuffle_X[test_size:]
y_train = shuffle_y[test_size:]
X_test = shuffle_X[:test_size]
y_test = shuffle_y[:test_size]
In [6]:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
(120, 4)
(30, 4)
(120, 1)
(30, 1)
In [8]:
# 方法2# 将x长度这么多的数,返回一个新的打乱顺序的数组,注意,数组中的元素不是原来的数据,而是混乱的索引
shuffle_index = np.random.permutation(len(X))
# 指定测试数据的比例
test_ratio = 0.2
test_size = int(len(X) * test_ratio)
test_index = shuffle_index[:test_size]
train_index = shuffle_index[test_size:]
X_train = X[train_index]
X_test = X[test_index]
y_train = y[train_index]
y_test = y[test_index]
In [9]:
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
(120, 4)
(30, 4)
(120,)
(30,)
In [17]:
import numpy as np
def train_test_split(X, y, test_ratio=0.2, seed=None):
assert X.shape[0] == y.shape[0]
assert 0.0 <= test_ratio <= 1.0
if seed:
# 是否使用随机种子,使随机结果相同,方便debug
np.random.seed(seed)
# permutation(n) 可直接生成一个随机排列的数组,含有n个元素
shuffle_index = np.random.permutation(len(X))
test_size = int(len(X) * test_ratio)
test_index = shuffle_index[:test_size]
train_index = shuffle_index[test_size:]
X_train = X[train_index]
X_test = X[test_index]
y_train = y[train_index]
y_test = y[test_index]
return X_train, X_test, y_train, y_test
In [21]:
from myAlgorithm.kNN import kNNClassifier
my_kNNClassifier = kNNClassifier(k=3)
my_kNNClassifier.fit(X_train, y_train)
y_predict = my_kNNClassifier.predict(X_test)
y_predict
y_test
# 两个向量的比较,返回一个布尔型向量,对这个布尔向量(faluse=1,true=0)sum,
sum(y_predict == y_test)
sum(y_predict == y_test)/len(y_test)
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
----> 1 from myAlgorithm.kNN import kNNClassifier
2 my_kNNClassifier = kNNClassifier(k=3)
3 my_kNNClassifier.fit(X_train, y_train)
4 y_predict = my_kNNClassifier.predict(X_test)
5 y_predict
ModuleNotFoundError: No module named 'myAlgorithm'
In [16]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=666)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
(120, 4)
(30, 4)
(120,)
(30,)
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.model_selection
import train_test_split
from sklearn.neighbors import KNeighborsClassifier
# 手写数字数据集,封装好的对象,可以理解为一个字段
digits = datasets.load_digits()
# 可以使用keys()方法来看一下数据集的详情digits.keys()
# 输出:dict_keys(['data', 'target', 'target_names', 'images', 'DESC
# 特征的shapeX = digits.data
X.shape
(1797, 64)
# 标签的shape
y = digits.target
y.shape
(1797, )
# 标签分类digits.target_names
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# 去除某一个具体的数据,查看其特征以及标签信息some_digit = X[666]
some_digit
array([ 0., 0., 5., 15., 14., 3., 0., 0., 0., 0., 13., 15., 9.,15., 2., 0., 0., 4., 16., 12., 0., 10., 6., 0., 0., 8.,16., 9., 0., 8., 10., 0., 0., 7., 15., 5., 0., 12., 11.,0., 0., 7., 13., 0., 5., 16., 6., 0., 0., 0., 16., 12.,15., 13., 1., 0., 0., 0., 6., 16., 12., 2., 0., 0.])
y[666]
# 也可以这条数据进行可视化
some_digmit_image = some_digit.reshape(8, 8)
plt.imshow(some_digmit_image, cmap = matplotlib.cm.binary)
plt.show()
# 指定最佳值的分数,初始化为0.0;设置最佳值k,初始值为-1
best_score = 0.0
best_k = -1
for k in range(1, 11): # 暂且设定到1~11的范围内
knn_clf = KNeighborsClassifier(n_neighbors=k)
knn_clf.fit(X_train, y_train)
score = knn_clf.score(X_test, y_test)
if score > best_score:
best_k = k
best_score = score
print("best_k = ", best_k)
print("best_score = ", best_score)
# 输出:best_k = 4best_score = 0.9916666666666667
# 两种方式进行比较
best_method = ""
best_score = 0.0
best_k = -1
for method in ["uniform","distance"]:
for k in range(1, 11):
knn_clf = KNeighborsClassifier(n_neighbors=k, weights=method, p=2)
knn_clf.fit(X_train, y_train)
score = knn_clf.score(X_test, y_test)
if score > best_score:
best_k = k
best_score = score
best_method = method
print("best_method = ", method)
print("best_k = ", best_k)
print("best_score = ", best_score)
# 输出:best_method = distance
# best_k = 4best_score = 0.9916666666666667