感知机学习算法的原始形式讲解 + 数学过程求解展示的示例,详见:点我
python知识点:
1)W与X的内积,用到np.dot(W,X)方法
# -*- coding: utf-8 -*-
"""
@author: 蔚蓝的天空tom
Aim:实现感知机学习算法的原始形式
"""
import numpy as np
class CPerceptron(object):
'''
实现感知机学习算法的原始形式
实现感知机学习算法的对偶形式
'''
def __init__(self, train_samples, Y):
self.X = train_samples #训练数据集的特征集合
self.Y = Y #训练数据集的类别集合
self.W = [] #权值向量, weight vector
self.b = None #偏置, bias
self.alpha = 1 #学习步长
self.cnt = 0
self.study()
def study(self):
self.cnt += 1
print('\n==================第%d次迭代.....'%self.cnt)
if 0 == np.shape(self.W)[0]:
self.W = np.full(shape=np.shape(self.X[0]), fill_value=0.0)
self.b = 0
#感知机学习
for i in range(np.shape(self.X)[0]):
loss = self.Y[i] * (np.dot(self.W, self.X[i]) + self.b)
if loss > 0: #分类正确
print('loss=%f, 分类正确:W:'%(loss), self.W, 'b:%d'%self.b, 'X%d='%i, self.X[i])
if i == np.arange(np.shape(self.X)[0])[-1]:
print('perceptron study over~')
return self.W,self.b
else: #loss <= 0 分类错误
old_W, old_b = self.W, self.b
self.W = self.W + self.alpha*self.X[i]*self.Y[i]
self.b = self.b + self.alpha*self.Y[i]
print('loos=%f, 分类错误:W:'%(loss), old_W, 'b:%d'%old_b,'x%d='%i, self.X[i], '更新后的W:', self.W, '更新后的b:', self.b)
self.study()
def Classifier(self, test_samples):
p_sign = lambda x : (np.sign(x)<0 and [-1] or [+1])[0]
ret = [p_sign(np.dot(self.W, x) + self.b) for x in test_samples]
return ret
def CPerceptron_manual():
#实例的特征集合, e = (x1,x2)T
X = np.array([[3,3],
[4,3],
[1,1]])
#实例的类别集合
Y = [1,1,-1]
p = CPerceptron(X,Y)
print('\n=====test samples')
test_samples = np.array([[0.5, 0.5],
[1,2],
[2,2],
[5,5]])
ret = p.Classifier(test_samples)
print('测试特征集合:\n',test_samples)
print('感知机分类结果:\n', np.array(ret))
test_samples = X
ret = p.Classifier(test_samples)
print('\n测试特征集合:\n',test_samples)
print('感知机分类结果:\n', np.array(ret))
return
if __name__ == '__main__':
CPerceptron_manual()
runfile('C:/Users/tom/perceptron_origin.py', wdir='C:/Users/tom')
==================第1次迭代.....
loos=0.000000, 分类错误:W: [ 0. 0.] b:0 x0= [3 3] 更新后的W: [ 3. 3.] 更新后的b: 1
==================第2次迭代.....
loss=19.000000, 分类正确:W: [ 3. 3.] b:1 X0= [3 3]
loss=22.000000, 分类正确:W: [ 3. 3.] b:1 X1= [4 3]
loos=-7.000000, 分类错误:W: [ 3. 3.] b:1 x2= [1 1] 更新后的W: [ 2. 2.] 更新后的b: 0
==================第3次迭代.....
loss=12.000000, 分类正确:W: [ 2. 2.] b:0 X0= [3 3]
loss=14.000000, 分类正确:W: [ 2. 2.] b:0 X1= [4 3]
loos=-4.000000, 分类错误:W: [ 2. 2.] b:0 x2= [1 1] 更新后的W: [ 1. 1.] 更新后的b: -1
==================第4次迭代.....
loss=5.000000, 分类正确:W: [ 1. 1.] b:-1 X0= [3 3]
loss=6.000000, 分类正确:W: [ 1. 1.] b:-1 X1= [4 3]
loos=-1.000000, 分类错误:W: [ 1. 1.] b:-1 x2= [1 1] 更新后的W: [ 0. 0.] 更新后的b: -2
==================第5次迭代.....
loos=-2.000000, 分类错误:W: [ 0. 0.] b:-2 x0= [3 3] 更新后的W: [ 3. 3.] 更新后的b: -1
==================第6次迭代.....
loss=17.000000, 分类正确:W: [ 3. 3.] b:-1 X0= [3 3]
loss=20.000000, 分类正确:W: [ 3. 3.] b:-1 X1= [4 3]
loos=-5.000000, 分类错误:W: [ 3. 3.] b:-1 x2= [1 1] 更新后的W: [ 2. 2.] 更新后的b: -2
==================第7次迭代.....
loss=10.000000, 分类正确:W: [ 2. 2.] b:-2 X0= [3 3]
loss=12.000000, 分类正确:W: [ 2. 2.] b:-2 X1= [4 3]
loos=-2.000000, 分类错误:W: [ 2. 2.] b:-2 x2= [1 1] 更新后的W: [ 1. 1.] 更新后的b: -3
==================第8次迭代.....
loss=3.000000, 分类正确:W: [ 1. 1.] b:-3 X0= [3 3]
loss=4.000000, 分类正确:W: [ 1. 1.] b:-3 X1= [4 3]
loss=1.000000, 分类正确:W: [ 1. 1.] b:-3 X2= [1 1]
perceptron study over~
loss=4.000000, 分类正确:W: [ 1. 1.] b:-3 X1= [4 3]
loss=1.000000, 分类正确:W: [ 1. 1.] b:-3 X2= [1 1]
perceptron study over~
loss=4.000000, 分类正确:W: [ 1. 1.] b:-3 X1= [4 3]
loss=1.000000, 分类正确:W: [ 1. 1.] b:-3 X2= [1 1]
perceptron study over~
=====test samples
测试特征集合:
[[ 0.5 0.5]
[ 1. 3. ]
[ 1. 2. ]
[ 2. 2. ]
[ 5. 5. ]]
感知机分类结果:
[-1 1 1 1 1]
测试特征集合:
[[3 3]
[4 3]
[1 1]]
感知机分类结果:
[ 1 1 -1]
In [73]:
(end)