pytorch搭建神经网络

目录

  • 一、回归问题
  • 二、快速搭建法
  • 三、保存和提取神经网络
  • 四、批处理
  • 五、optimizer优化器
  • 六、pytorch搭建卷积神经网络

一、回归问题

import torch
import matplotlib.pyplot as plt
import torch.nn.functional as f
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
# 这里为啥要用unsqueeze函数给x增加维度?因为torch只处理二维数据
y = x.pow(2) + 0.2*torch.rand(size=(x.size()))
plt.scatter(x.data.numpy(), y.data.numpy())
plt.show()

pytorch搭建神经网络_第1张图片
现用神经网络拟合这些数据,并使用反向传播来更新神经网络的参数值,使得loss达到最小。

自定义神经网络,继承nn.Module类,输入层1个、隐藏层有10个,输出层1个。

class RegressionNeural(nn.Module):
    def __init__(self, in_feature, n_hidden, n_out):
        super().__init__()
        self.hidden = nn.Linear(in_feature, n_hidden)
        self.out = nn.Linear(n_hidden, n_out)

	# 正向传播
    def forward(self, x):
        return self.out(f.relu(self.hidden(x)))


net = RegressionNeural(1, 10, 1)
print(net)

RegressionNeural(
  (hidden): Linear(in_features=1, out_features=10, bias=True)
  (out): Linear(in_features=10, out_features=1, bias=True)
)

训练数据,训练轮数为100。

# 定义优化器,采用SGD(stochastic gradient descent)策略,学习率为0.5
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
# 定义损失函数,均方误差
loss_func = torch.nn.MSELoss()

for step in range(100):
    prediction = net(x)
    loss = loss_func(prediction, y)
    # 梯度赋值为0
    optimizer.zero_grad()
    # 反向传播求出梯度
    loss.backward()
    # 更新参数
    optimizer.step()

二、快速搭建法

net = nn.Sequential(nn.Linear(4, 8), nn.ReLU(), nn.Linear(8, 1))
X = torch.rand(size=(2, 4))
print(net(X))

三、保存和提取神经网络

def save_network():
    net_one = nn.Sequential(nn.Linear(1, 10), nn.ReLU(), nn.Linear(10, 1))
    # 定义优化器,采用SGD(stochastic gradient descent)策略,学习率为0.5
    optimizer = torch.optim.SGD(net_one.parameters(), lr=0.5)
    # 定义损失函数,均方误差
    loss_func = torch.nn.MSELoss()

    for step in range(100):
        prediction = net_one(x)
        loss = loss_func(prediction, y)
        # 梯度赋值为0
        optimizer.zero_grad()
        # 反向传播求出梯度
        loss.backward()
        # 更新参数
        optimizer.step()
    torch.save(net_one, 'net.pkl')    # entire net
    torch.save(net_one.state_dict(), 'net_params.pkl')   # params


def restore_net():
    net_two = torch.load('net.pkl')


def restore_params():
    net_three = nn.Sequential(nn.Linear(1, 10), nn.ReLU(), nn.Linear(10, 1))
    net_three.load_state_dict(torch.load('net_params.pkl'))

四、批处理

众所周知,训练神经网络的时候,数据一次性全部用来训练效率太慢,所以要分批次来训练数据。

import torch
import torch.utils.data as Data
BATCH_SIZE = 5
x = torch.linspace(1, 10, 10)
y = torch.linspace(10, 1, 10)
dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
    dataset=dataset,
    batch_size=BATCH_SIZE,
    shuffle=True,   # 是否打乱数据
)

for epoch in range(2):
    for step, (batch_x, batch_y) in enumerate(loader):
        print('Epoch:', epoch)
        print('Step:', step)
        print('batch_x', batch_x)
        print('batch_y', batch_y)

五、optimizer优化器

optimizer = torch.optim.Adam(net.parameters(), lr=LR)

六、pytorch搭建卷积神经网络

class CNN(nn.Module):
    def __int__(self):
        super().__int__()
        self.conv1 = nn.Sequential(
            # 定义卷积核的属性,in_channels表示卷积核的层数,out_channels表示卷积核的个数
            nn.Conv2d(          # (1, 28, 28)
                in_channels=1,
                out_channels=16,
                kernel_size=5,    # 表示卷积核的长宽为5
                stride=1,         # 步长
                padding=2         # IF STRIDE=1,PADDING = (KERNEL_SIZE - 1) / 2
            ),                    # (16, 28, 28)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)    # (16, 14, 14)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(16, 32, 5, 1, 2),    # (32, 14, 14)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2)    # (32, 7, 7)
        )

        self.out = nn.Linear(32 * 7 * 7, 10)

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = x.view(x.size(0), -1)  # (batch, 32 * 7 * 7)
        output = self.out(x)
        return output

你可能感兴趣的:(pytorch,pytorch,神经网络)