(1)正确性:对大部分样本都可以正确划分类别;
(2)安全性:支持向量,即离分类边界最近的样本之间的距离最远;
(3)公平性:支持向量与分类边界的距离相等;
(4)简单性:采用线性方程(直线、平面)表示分类边界,也称分割超平面。如果在原始维度中无法做线性划分,那么就通过升维变换,在更高维度空间寻求线性分割超平面. 从低纬度空间到高纬度空间的变换通过核函数进行。
如果一组样本能使用一个线性函数将样本正确分类,称这些数据样本是线性可分的。那么什么是线性函数呢?在二维空间中就是一条直线,在三维空间中就是一个平面,以此类推,如果不考虑空间维数,这样的线性函数统称为超平面。
如果一组样本,无法找到一个线性函数将样本正确分类,则称这些样本线性不可分。以下是一个一维线性不可分的示例:
# 支持向量机示例
import numpy as np
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
import matplotlib.pyplot as mp
x, y = [], []
with open("../data/multiple2.txt", "r") as f:
for line in f.readlines():
data = [float(substr) for substr in line.split(",")]
x.append(data[:-1]) # 输入
y.append(data[-1]) # 输出
# 列表转数组
x = np.array(x)
y = np.array(y, dtype=int)
# 线性核函数支持向量机分类器
model = svm.SVC(kernel="linear") # 线性核函数
# model = svm.SVC(kernel="poly", degree=3) # 多项式核函数
# print("gamma:", model.gamma)
# 径向基核函数支持向量机分类器
# model = svm.SVC(kernel="rbf",
# gamma=0.01, # 概率密度标准差
# C=200) # 概率强度
model.fit(x, y)
# 计算图形边界
l, r, h = x[:, 0].min() - 1, x[:, 0].max() + 1, 0.005
b, t, v = x[:, 1].min() - 1, x[:, 1].max() + 1, 0.005
# 生成网格矩阵
grid_x = np.meshgrid(np.arange(l, r, h), np.arange(b, t, v))
flat_x = np.c_[grid_x[0].ravel(), grid_x[1].ravel()] # 合并
flat_y = model.predict(flat_x) # 根据网格矩阵预测分类
grid_y = flat_y.reshape(grid_x[0].shape) # 还原形状
mp.figure("SVM Classifier", facecolor="lightgray")
mp.title("SVM Classifier", fontsize=14)
mp.xlabel("x", fontsize=14)
mp.ylabel("y", fontsize=14)
mp.tick_params(labelsize=10)
mp.pcolormesh(grid_x[0], grid_x[1], grid_y, cmap="gray")
C0, C1 = (y == 0), (y == 1)
mp.scatter(x[C0][:, 0], x[C0][:, 1], c="orangered", s=80)
mp.scatter(x[C1][:, 0], x[C1][:, 1], c="limegreen", s=80)
mp.show()
y = x 1 + x 2 y = x 1 2 + 2 x 1 x 2 + x 2 2 y = x 1 3 + 3 x 1 2 x 2 + 3 x 1 x 2 2 + x 2 3 y = x_1 + x_2\\ y = x_1^2 + 2x_1x_2+x_2^2\\ y=x_1^3 + 3x_1^2x_2 + 3x_1x_2^2 + x_2^3 y=x1+x2y=x12+2x1x2+x22y=x13+3x12x2+3x1x22+x23
model = svm.SVC(kernel="poly", degree=3) # 多项式核函数
# 径向基核函数支持向量机分类器
model = svm.SVC(kernel="rbf",
gamma=0.01, # 概率密度标准差
C=600) # 概率强度,该值越大对错误分类的容忍度越小,分类精度越高,但泛化能力越差;该值越小,对错误分类容忍度越大,但泛化能力强
model = svm.SVC(kernel='linear')
model.fit(train_x, train_y)
# 基于线性核函数的支持向量机分类器
model = svm.SVC(kernel='poly', degree=3)
model.fit(train_x, train_y)
# 基于径向基核函数的支持向量机分类器
# C:正则强度
# gamma:'rbf','poly'和'sigmoid'的内核函数。伽马值越高,则会精确拟合训练数据集,有可能导致过拟合问题。
model = svm.SVC(kernel='rbf', C=600, gamma=0.01)
model.fit(train_x, train_y)
(1)支持向量机是二分类模型
(2)支持向量机通过寻找最优线性模型作为分类边界
(3)边界要求:正确性、公平性、安全性、简单性
(4)可以通过核函数将线性不可分转换为线性可分问题,核函数包括:线性核函数、多项式核函数、径向基核函数
(5)支持向量机适合少量样本的分类
import sklearn.model_selection as ms
params =
[{'kernel':['linear'], 'C':[1, 10, 100, 1000]},
{'kernel':['poly'], 'C':[1], 'degree':[2, 3]},
{'kernel':['rbf'], 'C':[1,10,100], 'gamma':[1, 0.1, 0.01]}]
model = ms.GridSearchCV(模型, params, cv=交叉验证次数)
model.fit(输入集,输出集)
# 获取网格搜索每个参数组合
model.cv_results_['params']
# 获取网格搜索每个参数组合所对应的平均测试分值
model.cv_results_['mean_test_score']
# 获取最好的参数
model.best_params_
model.best_score_
model.best_estimator_
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
data = pd.read_csv('multiple2.txt', header=None, names=['x1', 'x2', 'y'])
data.plot.scatter(x='x1', y='x2', c='y', cmap='brg')
import sklearn.model_selection as ms
import sklearn.svm as svm
import sklearn.metrics as sm
# 整理数据集,拆分测试集训练集
x, y = data.iloc[:, :-1], data['y']
train_x, test_x, train_y, test_y = ms.train_test_split(x, y, test_size=0.25, random_state=7)
model = svm.SVC(kernel='linear')
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))
"""
precision recall f1-score support
0 0.69 0.90 0.78 40
1 0.83 0.54 0.66 35
accuracy 0.73 75
macro avg 0.76 0.72 0.72 75
weighted avg 0.75 0.73 0.72 75
"""
data.head()
"""
x1 x2 y
0 5.35 4.48 0
1 6.72 5.37 0
2 3.57 5.25 0
3 4.77 7.65 1
4 2.25 4.07 1
"""
# 暴力绘制分类边界线
# 从x的min-max,拆出100个x坐标
# 从y的min-max,拆出100个y坐标
# 一共组成10000个坐标点,预测每个坐标点的类别标签,绘制散点
xs = np.linspace(data['x1'].min(), data['x1'].max(), 100)
ys = np.linspace(data['x2'].min(), data['x2'].max(), 100)
points = []
for x in xs:
for y in ys:
points.append([x, y])
points = np.array(points)
# 预测每个坐标点的类别标签 绘制散点
point_labels = model.predict(points)
plt.scatter(points[:,0], points[:,1], c=point_labels, cmap='gray')
plt.scatter(test_x['x1'], test_x['x2'], c=test_y, cmap='brg')
# 多项式核函数
model = svm.SVC(kernel='poly', degree=2)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
print(sm.classification_report(test_y, pred_test_y))
# 预测每个坐标点的类别标签 绘制散点
point_labels = model.predict(points)
plt.scatter(points[:,0], points[:,1], c=point_labels, cmap='gray')
plt.scatter(test_x['x1'], test_x['x2'], c=test_y, cmap='brg')
"""
precision recall f1-score support
0 0.84 0.95 0.89 40
1 0.93 0.80 0.86 35
accuracy 0.88 75
macro avg 0.89 0.88 0.88 75
weighted avg 0.89 0.88 0.88 75
"""
# 径向基核函数
model = svm.SVC(kernel='rbf', C=1, gamma=0.1)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
# print(sm.classification_report(test_y, pred_test_y))
# 预测每个坐标点的类别标签 绘制散点
point_labels = model.predict(points)
plt.scatter(points[:,0], points[:,1], c=point_labels, cmap='gray')
plt.scatter(test_x['x1'], test_x['x2'], c=test_y, cmap='brg')
"""
precision recall f1-score support
0 0.97 0.97 0.97 40
1 0.97 0.97 0.97 35
accuracy 0.97 75
macro avg 0.97 0.97 0.97 75
weighted avg 0.97 0.97 0.97 75
"""
# 通过网格搜索寻求最优超参数组合
model = svm.SVC()
# 网格搜索
params = [{'kernel':['linear'], 'C':[1, 10, 100]},
{'kernel':['poly'], 'degree':[2, 3]},
{'kernel':['rbf'], 'C':[1, 10, 100], 'gamma':[1, 0.1, 0.001]}]
model = ms.GridSearchCV(model, params, cv=5)
model.fit(train_x, train_y)
pred_test_y = model.predict(test_x)
# print(sm.classification_report(test_y, pred_test_y))
# 预测每个坐标点的类别标签 绘制散点
point_labels = model.predict(points)
plt.scatter(points[:,0], points[:,1], c=point_labels, cmap='gray')
plt.scatter(test_x['x1'], test_x['x2'], c=test_y, cmap='brg')
print(model.best_params_)
print(model.best_score_)
print(model.best_estimator_)
"""
{'C': 1, 'gamma': 1, 'kernel': 'rbf'}
0.9511111111111111
SVC(C=1, gamma=1)
"""