《PyTorch深度学习实践》10CNN基础(GPU版本)

1. 说明

本系列博客记录B站课程《PyTorch深度学习实践》的实践代码课程链接请点我

2. 代码(代码有比较详细的注释)

# ---------------------------
# @Time     : 2022/4/21 11:23
# @Author   : lcq
# @File     : 10_CNN_GPU_base.py
# @Function : 
# ---------------------------

import torch
from torchvision import transforms
from torchvision import datasets            # 分割成小批量数据
from torch.utils.data import DataLoader     # 分割成小批量数据
import torch.nn.functional as F
import torch.optim as optim                 # 优化器
import matplotlib.pyplot as plt
import time

import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']     # 显示中文
# 为了坐标轴负号正常显示。matplotlib默认不支持中文,设置中文字体后,负号会显示异常。需要手动将坐标轴负号设为False才能正常显示负号。
matplotlib.rcParams['axes.unicode_minus'] = False

# Step1:数据准备
batch_size = 64

transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])  # 归一化,均值和方差
train_dataset = datasets.MNIST(root='../dataset/mnist/',    # 表示获取mnist数据库
                               train=True,                  # 表示获取训练集
                               download=True,               # 表示本地没有则下载
                               transform=transform)
train_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)
test_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)


# Step2: 设计模型
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = torch.nn.Conv2d(in_channels=1,     # 输入的通道数,由输入决定,如果是RGB通道,则为3; 本例中,只有一个灰度通道,所以为1
                                     out_channels=10,   # 输出的通道数,自己设定,本例为10,表示会有10组(或个)卷积核参与卷积
                                                        # 之所以称之为组,是因为当输入为RGB 3通道时,一组卷积核也有对应的RGB 3个卷积核,
                                                        # 那么输出通道为10,则代表10组卷积核,我个人认为称为组更好理解。
                                     kernel_size=5      # 卷积核大小,只输入一个5表示:为5*5的矩阵
                                     )
        self.conv2 = torch.nn.Conv2d(in_channels=10,
                                     out_channels=20,
                                     kernel_size=5)
        self.pooling = torch.nn.MaxPool2d(2)            # 表示为 2*2 的最大池化
        self.fc = torch.nn.Linear(320, 10)              # 全连接

    def forward(self, X_input):
        # flatten data from (n, 1, 28, 28) to (n, 1*28*28)   n:样本数量; 1:表示通道数量; 28:宽; 28: 高
        batchSize = X_input.size(0)
        X_input = F.relu(self.pooling(self.conv1(X_input)))
        X_input = F.relu(self.pooling(self.conv2(X_input)))
        X_input = X_input.view(batchSize, -1)   # 展开
        y_pred = self.fc(X_input)               # 全连接
        return y_pred


model = Net()
# Step3: 切换到GPU上面去运行. cuda:0,表示使用第1块显卡; cuda:1, 表示使用第2块显卡
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = torch.device("cuda" if False else "cpu")

model.to(device)


# Step4: 构建损失函数核优化器
Loss = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)


# Step5: 训练
def train(epoch):
    running_loss = 0.0
    for batch_index, data in enumerate(train_loader, 0):
        X_input, Y_label = data
        # 将输入和label放入到GPU中
        X_input, Y_label = X_input.to(device), Y_label.to(device)

        optimizer.zero_grad()

        y_pred = model.forward(X_input)
        loss = Loss(y_pred, Y_label)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if batch_index % 10 == 9:
            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_index + 1, running_loss / 10))
            running_loss = 0.0


# Step5: 测试
def test():
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            X_test_input, Y_test_label = data
            X_test_input, Y_test_label = X_test_input.to(device), Y_test_label.to(device)
            y_test_pred = model.forward(X_test_input)
            _, predicted = torch.max(y_test_pred.data, dim=1)   # dim=1, 表示求出列的最大值; 返回两个数,第一个为该列最大值,第二个为最大值的行索引
            total += Y_test_label.size(0)
            correct += (predicted == Y_test_label).sum().item()
    print("测试集上的正确率: ", correct/total)
    return correct/total


if __name__ == '__main__':
    epoch_list = []
    acc_list = []

    startTime = time.time()
    for epoch in range(5):
        train(epoch)
        acc = test()
        epoch_list.append(epoch)
        acc_list.append(acc)

    endTime = time.time()
    stringTime = "用CPU跑耗时: " + str(endTime-startTime)
    plt.plot(epoch_list, acc_list)
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.title(stringTime)
    plt.show()

你可能感兴趣的:(深度学习,Pytorch,Python)