感知机根据不同分布数据样本得到不同的损失值(基于pytorch),简单理解算法收敛性问题

# 测试函数最常用来解决的一个问题 :算法最后结果是不是收敛的
'''
比如我们想用一条线去 分割一个数据集 将他分成两个类别-----》这里我们用感知机算法来测试一下
'''
'''
感知机一般只能解决线性分类问题也就是说图像是这个样子的,我们目的是找到一条线讲他们分开
'''
import matplotlib.pyplot as plt
import numpy as np
X = [[1, 1], [2, 2], [0, 0], [5, 5], [4, 5], [6, 4]]
y = [-1, -1, -1, 1, 1, 1]
def filer(X,y,W,b):
    M = []
    for i in range(0, len(X)):
        if (X[i][0] * W[0] + X[i][1] * W[1] + b) * y[i] < 0:
            M.append([X[i][0], X[i][1], y[i]])
    print(M)
    return M
def update(W,b,M, a):
    W1 = 0
    W2 = 0
    b_update = 0
    if len(M) > 0:
        for i in range(0, len(M)):
            W1 += M[i][0] * M[i][2]
            W2 += M[i][1] * M[i][2]
            b_update += M[i][2]
        W[0] = W[0] + a * M[i][0] * M[i][2]
        W[1] = W[1] + a * M[i][1] * M[i][2]
        b = b + a * M[i][2]
    return W, b
W = [2, 2]
b = 1

loss1 = []
while len(filer(X, y, W, b)) != 0:
    W, b = update(W, b, filer(X, y, W, b), 0.1)
    print(W)
    print(b)
    loss1.append(len(filer(X, y, W, b)) * len(filer(X, y, W, b)))
for i in range(0, len(X)):
    plt.scatter(X[i][0], X[i][1], color='r')
ax = np.linspace(0, 6, 100)
ay = [(W[0] * x + b)/(0 - W[1]) for x in ax]
plt.plot(ax, ay, color='b')
plt.show()
plt.plot(loss1, label='loss1')
plt.legend(loc=0)
plt.show()


#我们得到的算法 就是 (W[0] * x  + W[1] * y +b = 0) 如果 (x, y)带入大于0我们就 说该样本的属于1这个类
# 在上述算法中有一个非常重要的测试函数:len(filer(X, y, W, b)) != 0  也就是说误分类样本点的数 为 0 也就是说该算法有最小值

'''
如果数据分布一条直线分不开怎么办?
X = [[1, 1], [2, 2], [0, 0], [5, 5], [4, 5], [6, 4]]
y = [-1, -1, -1, 1, 1, 1]
'''

from torch import nn
from torch import optim as optimizer
import torch
from torch.autograd import Variable


X = [[1, 1], [2, 2], [0.1, 0.1], [0, 0], [5, 5], [4, 5], [6, 4]]
y = [[0], [0], [1], [1], [0], [1], [1]]
class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.lr = nn.Linear(2, 1)
        self.sm = nn.Sigmoid()

    def forward(self, x):
        x = self.lr(x)
        x = self.sm(x)
        return x
X = Variable(torch.Tensor(X).float(), requires_grad=True)
y = Variable(torch.Tensor(y).float())

model_sgd = Model()
model_ad = Model()
loss_function = torch.nn.BCELoss()

sgd = optimizer.SGD(model_sgd.parameters(), lr=0.01)

loss2 = []
for epoch in range(1000):
    out = model_sgd(X)
    sgd.zero_grad()
    loss = loss_function(out, y)
    loss.backward()
    sgd.step()

    print('epoch={}, loss={}'.format(epoch, loss.data.numpy()))
    loss2.append(loss.data.numpy())

plt.plot(loss2, label='loss2')
plt.legend(loc=0)
plt.show()

你可能感兴趣的:(机器学,人工智能,pytorch,测试函数,收敛性,感知机)