pytorch实战(二)——搭建卷积神经网络(CNN)

笔者以前都是用tensorflow做深度学习,tensorflow系列教程见tensorflow实战——一位安分的码农
后来做目标检测用yolo的时候,发现pytorch真香。yolo系列教程见yolov5实战——一位安分的码农
终于抽出时间系统学习pytorch了,开干!
此文讲的是基于pytorch,利用class和sequential搭建卷积神经网络。
关于如何配置pytorch环境,我很早就做过了,见pytorch实战(一)——环境配置教程(基于Anaconda)

一、利用Class和Sequential搭建CNN

此种方法更易懂,推荐采用Class和Sequential进行搭建

import torch
import torch.nn as nn
import torch.nn.functional as F

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

class ConvNet(nn.Module):

    def __init__(self):
        super(ConvNet, self).__init__()

        self.layer1 = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1, padding=0),
            nn.ReLU(),
            nn.BatchNorm2d(num_features=6),
            nn.MaxPool2d(kernel_size=2),
        )

        self.layer2 = nn.Sequential(
            nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0),
            nn.ReLU(),
            nn.BatchNorm2d(num_features=16),
            nn.MaxPool2d(kernel_size=2)
        )

        self.layer3 = nn.Sequential(
            # nn.Flatten(),
            nn.Linear(in_features=16 * 5 * 5, out_features=120),
            nn.ReLU(),
            nn.Linear(in_features=120, out_features=84),
            nn.ReLU(),
            nn.Linear(in_features=84, out_features=10)
        )

    def forward(self, x):
        x = self.layer1(x)
        x = self.layer2(x)
        x = x.reshape(x.size(0), -1)
        x = self.layer3(x)
        return x


# N是批大小; D_in 是输入维度;
# H 是通道数; D_out 是输出维度
N, D_in, H, D_out = 1, 32, 6, 10

# 产生输入和输出的随机张量
x = torch.randn(1, 1, 32, 32)
y = torch.randn(1, 10)
x = x.to(device)
y = y.to(device)

# 通过实例化上面定义的类来构建我们的模型。
model = ConvNet().to(device)

# 构造损失函数和优化器。
# SGD构造函数中对model.parameters()的调用,
# 将包含模型的一部分,即两个nn.Linear模块的可学习参数。
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(500):
    # 前向传播:通过向模型传递x计算预测值y
    y_pred = model(x)

    #计算并输出loss
    loss = criterion(y_pred, y)
    print(t, loss.item())

    # 清零梯度,反向传播,更新权重
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

二、利用Class搭建CNN

此种方法搭建,阅读网络结构的时候,没有第一方法简便,因此不推荐采用此种方法

import torch
import torch.nn as nn
import torch.nn.functional as F

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

class ConvNet(nn.Module):

    def __init__(self):
        super(ConvNet, self).__init__()
        self.conv1 = nn.Conv2d(1, 6, 5)
        self.batchNorm1 = nn.BatchNorm2d(6)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.batchNorm2 = nn.BatchNorm2d(16)
        # an affine operation: y = Wx + b
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        # Max pooling over a (2, 2) window
        x = F.relu(self.conv1(x))
        x = self.batchNorm1(x)
        x = F.max_pool2d(x, 2)
        x = F.relu(self.conv2(x))
        x = self.batchNorm2(x)
        x = F.max_pool2d(x, 2)
        x = x.reshape(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x


# N是批大小; D_in 是输入维度;
# H 是通道数; D_out 是输出维度
N, D_in, H, D_out = 1, 32, 6, 10

# 产生输入和输出的随机张量
x = torch.randn(1, 1, 32, 32)
y = torch.randn(1, 10)
x = x.to(device)
y = y.to(device)

# 通过实例化上面定义的类来构建我们的模型。
model = ConvNet().to(device)

# 构造损失函数和优化器。
# SGD构造函数中对model.parameters()的调用,
# 将包含模型的一部分,即两个nn.Linear模块的可学习参数。
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(500):
    # 前向传播:通过向模型传递x计算预测值y
    y_pred = model(x)

    #计算并输出loss
    loss = criterion(y_pred, y)
    print(t, loss.item())

    # 清零梯度,反向传播,更新权重
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

备注:
pyorch官方教程的中文文档地址为https://pytorch.panchuang.net/ThirdSection/LearningPyTorch/

你可能感兴趣的:(pytorch实战,pytorch,cnn,深度学习)