数据集见上一篇文章Titanic数据集可视化
import csv
import numpy as np
import matplotlib.pyplot as plt
import copy
from time import sleep
import random
import types
def loadDataset(filename):
with open(filename, 'r') as f:
lines = csv.reader(f)
data_set = list(lines)
offset = 1 if 'test' in filename else 0
# 整理数据
for i in range(len(data_set)):
del(data_set[i][0])
del(data_set[i][3-1-offset])
data_set[i][6-2-offset] += data_set[i][7-2-offset]
del(data_set[i][7-2-offset])
del(data_set[i][8-3-offset])
del(data_set[i][10-4-offset])
del(data_set[i][11-5-offset])
if 'train' in filename:
survived = data_set[i].pop(0)
data_set[i].append(survived)
category = data_set[0]
del (data_set[0])
'''with open(filename, 'r') as f:
reader = csv.reader(f)
lines = list(reader)
res = [0 for i in range(len(lines[0]))]
for line in lines:
for i in range(len(line)):
res[i] += 1 if line[i] != '' else 0
print(lines[0], res)'''
#留下的特征
# 训练集:['Pclass', 'Sex', 'Age', 'SibSpParch', 'Fare', 'Survived']
# "Age"有缺项
# 测试集:['Pclass', 'Sex', 'Age', 'SibSpParch', 'Fare']
# "Age"&"Fare"有缺项
# 转换数据格式
for data in data_set:
pclass = int(data[0])
# male : 1, female : 0
sex = 1 if data[1] == 'male' else 0
age = int(float(data[2])) if data[2] != '' else 28
sibspparch = float(data[3][0])+float(data[3][1])
fare = float(data[4]) if data[4] != '' else 0
# 补全缺失值 转换记录方式 分类
# 经过测试,如果不将数据进行以下处理,分布会过于密集,处理后,数据的分布变得稀疏了
# age <25 为0, 25<=age<31为1,age>=31为2
if age < 25:
age = 0
elif age >= 25 and age < 60: # 但是测试得60分界准确率最高???!!!
age = 1
else:
age = 2
# sibsp&parch以2为界限,小于为0,大于为1
if sibspparch < 2:
sibspparch = 0
else:
sibspparch = 1
# fare以64为界限
if fare < 64:
fare = 0
else:
fare = 1
#更新数据
data[0] = pclass
data[1] = sex
data[2] = age
data[3] = sibspparch
data[4] = fare
if 'train' in filename:
data[-1] = int(data[-1])
#print(len(data_set), category)
return data_set, category
def split_data(data):
data_set = copy.deepcopy(data)
data_mat = []
label_mat = []
for i in range(len(data_set)):
if data_set[i][-1] == 0:
data_set[i][-1] = -1
label_mat.append(data_set[i][-1])
del(data_set[i][-1])
data_mat.append(data_set[i])
return data_mat, label_mat
def select_j_rand(i ,m):
# 选取alpha
j = i
while j == i:
j = int(random.uniform(0, m))
return j
def clip_alptha(aj, H, L):
# 修剪alpha
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def smo(data_mat_In, class_label, C, toler, max_iter):
# 转化为numpy的mat存储
data_matrix = np.mat(data_mat_In)
label_mat = np.mat(class_label).transpose()
# data_matrix = data_mat_In
# label_mat = class_label
# 初始化b,统计data_matrix的纬度
b = 0
m, n = np.shape(data_matrix)
# 初始化alpha,设为0
alphas = np.mat(np.zeros((m, 1)))
# 初始化迭代次数
iter_num = 0
# 最多迭代max_iter次
while iter_num < max_iter:
alpha_pairs_changed = 0
for i in range(m):
# 计算误差Ei
fxi = float(np.multiply(alphas, label_mat).T*(data_matrix*data_matrix[i, :].T)) + b
Ei = fxi - float(label_mat[i])
# 优化alpha,松弛向量
if (label_mat[i]*Ei < -toler and alphas[i] < C) or (label_mat[i]*Ei > toler and alphas[i] > 0):
# 随机选取另一个与alpha_j成对优化的alpha_j
j = select_j_rand(i, m)
# 1.计算误差Ej
fxj = float(np.multiply(alphas, label_mat).T*(data_matrix*data_matrix[j, :].T)) + b
Ej = fxj - float(label_mat[j])
# 保存更新前的alpha,deepcopy
alpha_i_old = copy.deepcopy(alphas[i])
alpha_j_old = copy.deepcopy(alphas[j])
# 2.计算上下界L和H
if label_mat[i] != label_mat[j]:
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L == H:
print("L == H")
continue
# 3.计算eta
eta = 2.0 * data_matrix[i, :]*data_matrix[j, :].T - data_matrix[i, :]*data_matrix[i, :].T - data_matrix[j, :]*data_matrix[j, :].T
if eta >= 0:
print("eta >= 0")
continue
# 4.更新alpha_j
alphas[j] -= label_mat[j]*(Ei - Ej)/eta
# 5.修剪alpha_j
alphas[j] = clip_alptha(alphas[j], H, L)
if abs(alphas[j] - alphas[i]) < 0.001:
print("alpha_j变化太小")
continue
# 6.更新alpha_i
alphas[i] += label_mat[j]*label_mat[i]*(alpha_j_old - alphas[j])
# 7.更新b_1和b_2
b_1 = b - Ei - label_mat[i]*(alphas[i] - alpha_i_old)*data_matrix[i, :]*data_matrix[i, :].T - label_mat[j]*(alphas[j] - alpha_j_old)*data_matrix[i, :]*data_matrix[j, :].T
b_2 = b - Ej - label_mat[i]*(alphas[i] - alpha_i_old)*data_matrix[i, :]*data_matrix[j, :].T - label_mat[j]*(alphas[j] - alpha_j_old)*data_matrix[j, :] * data_matrix[j, :].T
# 8.根据b_1和b_2更新b
if 0 < alphas[i] and C > alphas[i]:
b = b_1
elif 0 < alphas[j] and C > alphas[j]:
b = b_2
else:
b = (b_1 + b_2)/2
# 统计优化次数
alpha_pairs_changed += 1
# 打印统计信息
print("第%d次迭代 样本:%d , alpha优化次数:%d" % (iter_num, i, alpha_pairs_changed))
# 更新迭代次数
if alpha_pairs_changed == 0:
iter_num += 1
else:
iter_num = 0
print("迭代次数:%d" % iter_num)
return b, alphas
def caluelate_w(data_mat, label_mat, alphas):
# 计算w
alphas = np.array(alphas)
data_mat = np.array(data_mat)
label_mat = np.array(label_mat)
# numpy.tile(A, reps):通过重复A给出的次数来构造数组。
# numpy中reshape函数的三种常见相关用法
# reshape(1, -1)转化成1行:
# reshape(2, -1)转换成两行:
# reshape(-1, 1)转换成1列:
# reshape(-1, 2)转化成两列
w = np.dot((np.tile(label_mat.reshape(1, -1).T, (1, 5))*data_mat).T, alphas)
return w.tolist()
def prediction(test, w, b):
test = np.mat(test)
result = []
for i in test:
if i*w+b > 0:
result.append(1)
else:
result.append(-1)
return result
import csv
import numpy as np
import matplotlib.pyplot as plt
import copy
from time import sleep
import random
import types
def loadDataset(filename):
with open(filename, 'r') as f:
lines = csv.reader(f)
data_set = list(lines)
offset = 1 if 'test' in filename else 0
# 整理数据
for i in range(len(data_set)):
del(data_set[i][0])
del(data_set[i][3-1-offset])
data_set[i][6-2-offset] += data_set[i][7-2-offset]
del(data_set[i][7-2-offset])
del(data_set[i][8-3-offset])
del(data_set[i][10-4-offset])
del(data_set[i][11-5-offset])
if 'train' in filename:
survived = data_set[i].pop(0)
data_set[i].append(survived)
category = data_set[0]
del (data_set[0])
'''with open(filename, 'r') as f:
reader = csv.reader(f)
lines = list(reader)
res = [0 for i in range(len(lines[0]))]
for line in lines:
for i in range(len(line)):
res[i] += 1 if line[i] != '' else 0
print(lines[0], res)'''
#留下的特征
# 训练集:['Pclass', 'Sex', 'Age', 'SibSpParch', 'Fare', 'Survived']
# "Age"有缺项
# 测试集:['Pclass', 'Sex', 'Age', 'SibSpParch', 'Fare']
# "Age"&"Fare"有缺项
# 转换数据格式
for data in data_set:
pclass = int(data[0])
# male : 1, female : 0
sex = 1 if data[1] == 'male' else 0
age = int(float(data[2])) if data[2] != '' else 28
sibspparch = float(data[3][0])+float(data[3][1])
fare = float(data[4]) if data[4] != '' else 0
# 补全缺失值 转换记录方式 分类
# 经过测试,如果不将数据进行以下处理,分布会过于密集,处理后,数据的分布变得稀疏了
# age <25 为0, 25<=age<31为1,age>=31为2
if age < 25:
age = 0
elif age >= 25 and age < 60: # 但是测试得60分界准确率最高???!!!
age = 1
else:
age = 2
# sibsp&parch以2为界限,小于为0,大于为1
if sibspparch < 2:
sibspparch = 0
else:
sibspparch = 1
# fare以64为界限
if fare < 64:
fare = 0
else:
fare = 1
#更新数据
data[0] = pclass
data[1] = sex
data[2] = age
data[3] = sibspparch
data[4] = fare
if 'train' in filename:
data[-1] = int(data[-1])
#print(len(data_set), category)
return data_set, category
def split_data(data):
data_set = copy.deepcopy(data)
data_mat = []
label_mat = []
for i in range(len(data_set)):
if data_set[i][-1] == 0:
data_set[i][-1] = -1
label_mat.append(data_set[i][-1])
del(data_set[i][-1])
data_mat.append(data_set[i])
return data_mat, label_mat
def select_j_rand(i ,m):
# 选取alpha
j = i
while j == i:
j = int(random.uniform(0, m))
return j
def clip_alptha(aj, H, L):
# 修剪alpha
if aj > H:
aj = H
if L > aj:
aj = L
return aj
def smo(data_mat_In, class_label, C, toler, max_iter):
# 转化为numpy的mat存储
data_matrix = np.mat(data_mat_In)
label_mat = np.mat(class_label).transpose()
# data_matrix = data_mat_In
# label_mat = class_label
# 初始化b,统计data_matrix的纬度
b = 0
m, n = np.shape(data_matrix)
# 初始化alpha,设为0
alphas = np.mat(np.zeros((m, 1)))
# 初始化迭代次数
iter_num = 0
# 最多迭代max_iter次
while iter_num < max_iter:
alpha_pairs_changed = 0
for i in range(m):
# 计算误差Ei
fxi = float(np.multiply(alphas, label_mat).T*(data_matrix*data_matrix[i, :].T)) + b
Ei = fxi - float(label_mat[i])
# 优化alpha,松弛向量
if (label_mat[i]*Ei < -toler and alphas[i] < C) or (label_mat[i]*Ei > toler and alphas[i] > 0):
# 随机选取另一个与alpha_j成对优化的alpha_j
j = select_j_rand(i, m)
# 1.计算误差Ej
fxj = float(np.multiply(alphas, label_mat).T*(data_matrix*data_matrix[j, :].T)) + b
Ej = fxj - float(label_mat[j])
# 保存更新前的alpha,deepcopy
alpha_i_old = copy.deepcopy(alphas[i])
alpha_j_old = copy.deepcopy(alphas[j])
# 2.计算上下界L和H
if label_mat[i] != label_mat[j]:
L = max(0, alphas[j] - alphas[i])
H = min(C, C + alphas[j] - alphas[i])
else:
L = max(0, alphas[j] + alphas[i] - C)
H = min(C, alphas[j] + alphas[i])
if L == H:
print("L == H")
continue
# 3.计算eta
eta = 2.0 * data_matrix[i, :]*data_matrix[j, :].T - data_matrix[i, :]*data_matrix[i, :].T - data_matrix[j, :]*data_matrix[j, :].T
if eta >= 0:
print("eta >= 0")
continue
# 4.更新alpha_j
alphas[j] -= label_mat[j]*(Ei - Ej)/eta
# 5.修剪alpha_j
alphas[j] = clip_alptha(alphas[j], H, L)
if abs(alphas[j] - alphas[i]) < 0.001:
print("alpha_j变化太小")
continue
# 6.更新alpha_i
alphas[i] += label_mat[j]*label_mat[i]*(alpha_j_old - alphas[j])
# 7.更新b_1和b_2
b_1 = b - Ei - label_mat[i]*(alphas[i] - alpha_i_old)*data_matrix[i, :]*data_matrix[i, :].T - label_mat[j]*(alphas[j] - alpha_j_old)*data_matrix[i, :]*data_matrix[j, :].T
b_2 = b - Ej - label_mat[i]*(alphas[i] - alpha_i_old)*data_matrix[i, :]*data_matrix[j, :].T - label_mat[j]*(alphas[j] - alpha_j_old)*data_matrix[j, :] * data_matrix[j, :].T
# 8.根据b_1和b_2更新b
if 0 < alphas[i] and C > alphas[i]:
b = b_1
elif 0 < alphas[j] and C > alphas[j]:
b = b_2
else:
b = (b_1 + b_2)/2
# 统计优化次数
alpha_pairs_changed += 1
# 打印统计信息
print("第%d次迭代 样本:%d , alpha优化次数:%d" % (iter_num, i, alpha_pairs_changed))
# 更新迭代次数
if alpha_pairs_changed == 0:
iter_num += 1
else:
iter_num = 0
print("迭代次数:%d" % iter_num)
return b, alphas
def caluelate_w(data_mat, label_mat, alphas):
# 计算w
alphas = np.array(alphas)
data_mat = np.array(data_mat)
label_mat = np.array(label_mat)
# numpy.tile(A, reps):通过重复A给出的次数来构造数组。
# numpy中reshape函数的三种常见相关用法
# reshape(1, -1)转化成1行:
# reshape(2, -1)转换成两行:
# reshape(-1, 1)转换成1列:
# reshape(-1, 2)转化成两列
w = np.dot((np.tile(label_mat.reshape(1, -1).T, (1, 5))*data_mat).T, alphas)
return w.tolist()
def prediction(test, w, b):
test = np.mat(test)
result = []
for i in test:
if i*w+b > 0:
result.append(1)
else:
result.append(-1)
return result
if __name__ == "__main__":
pre_set, category_pre = loadDataset(r'D:\VS-Code-python\ML_algorithm\titanic_test.csv')
data_set, category_train = loadDataset(r'D:\VS-Code-python\ML_algorithm\titanic_train.csv')
data_mat, label_mat = split_data(data_set)
test_mat = data_mat[:200]
test_label = label_mat[:200]
data_mat = data_mat[200:]
label_mat = label_mat[200:]
#训练
b, alphas = smo(data_mat, label_mat, 0.6, 0.001, 40)
#print(b)
#print(alphas)
w = caluelate_w(data_mat, label_mat, alphas)
#print(w)
result = prediction(test_mat, w, b)
pd_result = prediction(pre_set, w, b)
count = 0
survived = 0
pd_survived = 0
#准确率
for i in range(len(result)):
if result[i] == test_label[i]:
count += 1
#训练集存活率
for i in range(len(data_mat)):
if label_mat[i] == 1:
survived += 1
#预测存活率
for i in range(len(pd_result)):
if pd_result[i] == 1:
pd_survived += 1
print('survive_rate_in_training_set:'+str(survived/len(data_mat)*100)+'%')
print('accuracy:'+str(count/len(result)*100)+'%')
print('pd_survive_rate:'+str(pd_survived/len(pd_result)*100)+'%')
# 最终结果
survive_rate_in_training_set:39.507959479015916%
accuracy:82.0%
pd_survive_rate:36.60287081339713%
# 训练出的b
[[3.90633558]]
# 训练出的alpha
[[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[2.21228910e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[1.33931171e-02]
[3.11346521e-03]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[5.96746988e-01]
[0.00000000e+00]
[5.98963504e-01]
[6.00000000e-01]
[3.24139295e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[2.49365681e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[2.24335075e-01]
[0.00000000e+00]
[6.00000000e-01]
[3.48293311e-01]
[6.00000000e-01]
[6.00000000e-01]
[4.13172180e-01]
[8.36648240e-04]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[4.40684986e-01]
[0.00000000e+00]
[6.00000000e-01]
[5.97902282e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[4.21578006e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[1.57558986e-02]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[3.39468691e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[1.04639526e-01]
[4.04105827e-02]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[1.97629091e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[3.16995499e-01]
[0.00000000e+00]
[1.08385506e-02]
[9.88601205e-02]
[0.00000000e+00]
[5.90757967e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[1.59846120e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[1.24009444e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[1.27821063e-01]
[1.73683705e-01]
[0.00000000e+00]
[5.03723055e-02]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[3.36100407e-02]
[6.00000000e-01]
[3.60411663e-03]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[4.96027009e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[1.76738696e-03]
[2.05165717e-02]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[5.82346161e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[9.95706488e-02]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[1.41335550e-02]
[1.67911965e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[2.33293741e-01]
[6.00000000e-01]
[5.60865131e-01]
[6.00000000e-01]
[8.15717061e-04]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[3.39263319e-01]
[0.00000000e+00]
[6.00000000e-01]
[4.22382885e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[1.38654506e-01]
[6.00000000e-01]
[6.00000000e-01]
[1.87971103e-01]
[0.00000000e+00]
[1.09424735e-01]
[6.00000000e-01]
[0.00000000e+00]
[1.34456365e-01]
[0.00000000e+00]
[6.00000000e-01]
[1.06581410e-14]
[0.00000000e+00]
[2.65795898e-03]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[2.26980796e-01]
[2.21682053e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[3.05769718e-04]
[5.64730165e-01]
[3.64697586e-03]
[0.00000000e+00]
[2.48136667e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[1.05019687e-01]
[6.00000000e-01]
[5.81656370e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[4.58736136e-01]
[0.00000000e+00]
[1.85225694e-03]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[2.88488609e-01]
[6.00000000e-01]
[0.00000000e+00]
[5.75276384e-01]
[0.00000000e+00]
[0.00000000e+00]
[4.63535846e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[3.71225012e-03]
[2.41015977e-02]
[3.83847986e-04]
[1.68393062e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[5.75296843e-01]
[6.00000000e-01]
[2.72085247e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[2.68177672e-01]
[1.70706170e-03]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[1.71265335e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[1.78599421e-01]
[0.00000000e+00]
[0.00000000e+00]
[7.77296921e-02]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[4.87252034e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[5.31022921e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[5.04214089e-02]
[0.00000000e+00]
[6.00000000e-01]
[4.49746323e-01]
[5.94392879e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[5.98079399e-01]
[0.00000000e+00]
[6.00000000e-01]
[1.91923993e-04]
[3.64924882e-02]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[8.11474740e-02]
[1.56135451e-02]
[4.32143935e-03]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[4.76077742e-01]
[0.00000000e+00]
[2.61387773e-02]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[5.25537716e-04]
[6.00000000e-01]
[6.38198712e-02]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[1.10661794e-01]
[0.00000000e+00]
[2.49500724e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[1.62630326e-19]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[2.21128776e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[3.53962170e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[7.25932350e-02]
[6.00000000e-01]
[9.53105799e-03]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[8.64287869e-03]
[0.00000000e+00]
[3.86987563e-01]
[6.00000000e-01]
[0.00000000e+00]
[1.43714051e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[5.18144372e-01]
[6.00000000e-01]
[1.13772084e-01]
[6.00000000e-01]
[0.00000000e+00]
[7.95659834e-15]
[0.00000000e+00]
[0.00000000e+00]
[7.68209960e-03]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[5.93899135e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[1.38147257e-01]
[1.48072850e-01]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[8.76434612e-02]
[0.00000000e+00]
[0.00000000e+00]
[2.89269832e-01]
[0.00000000e+00]
[1.05075542e-01]
[1.45768749e-02]
[0.00000000e+00]
[6.00000000e-01]
[1.34814525e-01]
[6.00000000e-01]
[0.00000000e+00]
[5.49318063e-01]
[4.05176205e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[1.69368428e-01]
[2.91018982e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[1.27500184e-02]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[1.87401996e-04]
[6.30645259e-04]
[9.29904843e-02]
[6.00000000e-01]
[2.21082561e-01]
[5.91507647e-01]
[6.00000000e-01]
[3.63045128e-01]
[6.00000000e-01]
[0.00000000e+00]
[1.77721524e-01]
[2.83388924e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[9.78860473e-03]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.54702627e-02]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[3.14592953e-01]
[0.00000000e+00]
[0.00000000e+00]
[2.65007849e-01]
[0.00000000e+00]
[0.00000000e+00]
[4.41059537e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[4.37620841e-02]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[3.77850212e-02]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[2.74834210e-01]
[4.06848734e-01]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[1.02978931e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[1.71584420e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[4.72226170e-02]
[0.00000000e+00]
[1.75711958e-03]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[6.00000000e-01]
[7.64025915e-02]
[6.00000000e-01]
[0.00000000e+00]
[1.26857110e-03]
[0.00000000e+00]
[0.00000000e+00]
[1.11022302e-16]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[1.54747976e-01]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[3.60164019e-01]
[0.00000000e+00]
[1.53189258e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[4.75697126e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.67901131e-02]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[6.00000000e-01]
[1.06613480e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[3.65367450e-02]
[5.83699824e-01]
[4.64139371e-04]
[0.00000000e+00]
[0.00000000e+00]
[0.00000000e+00]
[3.08234094e-01]
[0.00000000e+00]
[6.00000000e-01]
[0.00000000e+00]
[0.00000000e+00]
[6.00000000e-01]
[7.23492052e-03]
[6.00000000e-01]
[0.00000000e+00]]
# 训练出的w
[[-0.9687785267160129], [-1.9996270734827286], [-0.969151453233287], [-0.0003729265172349727], [-0.9680326736814941]]
reference:
Jack-Cherish/Machine-Learning
SVM算法之代码实现