前馈神经网络回归测试

深度学习实验,刚开始的时候,一般都有logistic和softmax回归测试例子,接下来的前馈神经网络、卷积神经网络也有回归测试。

前馈神经网络的回归测试是这样的,直接上代码:


import torch
import numpy as np
from matplotlib import pyplot as plt
import pdb

def relu(x):
    return torch.where(x > 0, x, torch.tensor(0.0))

def test_broadcast():
    pdb.set_trace()
    test1 =  torch.arange(0, 5).view(1, 5)
    
    test2 = torch.arange(0, 10).view(1, 10)
    test3 = torch.mm(test1,test2)
    return test3

#print(test_broadcast())

def network(x):
    H = torch.mm(x, W1) + b1
    H = relu(H)
    #return H
    return torch.mm(H, W2) + b2

# 定义均方误差函数
def mse(preds, y):
    return torch.sum((preds.squeeze(1) - y.squeeze(1)) ** 2) / len(preds)

if __name__ == "__main__":
    point = 1000
    x_np = np.linspace(-1, 1, point, dtype=np.float32)
    x = torch.unsqueeze(torch.from_numpy(x_np), dim=1)
    #print(x)
    y = 2.0 * x ** 2 + 1*x + 0.2 * torch.randn(x.size())

    W1 = torch.normal(0, 0.01, (1, 32))
    b1 = torch.zeros(32)
    W2 = torch.normal(0, 0.01, (32, 1))
    b2 = torch.zeros(1)

    params = [W1, W2, b1, b2]
    for param in params:
        param.requires_grad_(requires_grad=True)

    lr = 0.1
    for i in range(point):
        preds = network(x)
        loss = mse(preds, y)

        loss.backward()

        for param in params:
            param.data = param - lr * param.grad

        for param in params:
            param.grad.data.zero_()

    preds = network(x)
    preds = preds.detach().numpy()
    plt.plot(x, preds, color='r')
    plt.scatter(x, y)
    plt.show()
    

注意:

  1. 本测试程序实现了求解方程 y = 2 x 2 + x + c y= 2x^2 + x + c y=2x2+x+c的最小值的算法。
  2. 为什么要用两次线性拟合呢?network函数使用w1,w2,b1,b2实现了两次线性拟合

第二种实现:

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim


class RegressionModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.hid1 = nn.Linear(1, 32)
        self.hid2 = nn.Linear(32, 1)

    def forward(self, x):
        h = self.hid1(x)
        h = F.relu(h)

        out = self.hid2(h)
        #print(out)
        return out

if __name__ == "__main__":
    criti = nn.MSELoss()
    lr = 0.1
    net = RegressionModel()
    optimizer = optim.SGD(net.parameters(), lr=lr)

    for i in range(200):
        preds = net(x)
        loss = criti(preds, y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        #print(loss.item())

    preds = net(x)
    preds = preds.detach().numpy()
    plt.plot(x, preds, color='r')
    plt.scatter(x, y)
    plt.show()

注意:

  1. forward函数中为何没有对out调用relu?

你可能感兴趣的:(强化学习,神经网络,人工智能,深度学习)