pytorch搭建的cnn,训练mnist数据集【实测成功】

仅作为记录,大佬请跳过。

文章目录

  • 前言
  • 直接上代码(可直接运行)
  • gpu版本代码(电脑里配置好gpu的可直接运行)
    • gpu代码

博主这一次运行代码,感受到了gpu跑程序的魅力:

pytorch的cnn训练mnist数据集5次,cpu跑用了2分59秒,相同的程序相同的数据gpu跑用了19秒

pytorch搭建的cnn,训练mnist数据集【实测成功】_第1张图片

前言

准备工作:

博主将mnist数据集从官网下载到了电脑,然后用pytorch直接读取。
(具体方法可参考博主文章传送门)

直接上代码(可直接运行)

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import torchvision

import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import gzip
import os
import torchvision
import cv2
import matplotlib.pyplot as plt


# 加载MNIST数据集

# #************************************a2torchloadlocalminist*********************************************************
class DealDataset(Dataset):
    """
        读取数据、初始化数据
    """

    def __init__(self, folder, data_name, label_name, transform=None):
        (train_set, train_labels) = load_data(folder, data_name,
                                              label_name)  # 其实也可以直接使用torch.load(),读取之后的结果为torch.Tensor形式
        self.train_set = train_set
        self.train_labels = train_labels
        self.transform = transform

    def __getitem__(self, index):
        img, target = self.train_set[index], int(self.train_labels[index])
        if self.transform is not None:
            img = self.transform(img)
        return img, target

    def __len__(self):
        return len(self.train_set)


def load_data(data_folder, data_name, label_name):
    with gzip.open(os.path.join(data_folder, label_name), 'rb') as lbpath:  # rb表示的是读取二进制数据
        y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(os.path.join(data_folder, data_name), 'rb') as imgpath:
        x_train = np.frombuffer(
            imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
    return (x_train, y_train)


train_dataset = DealDataset(r'E:\第一次实验\data', "train-images-idx3-ubyte.gz",
                           "train-labels-idx1-ubyte.gz", transform=transforms.ToTensor())
test_dataset = DealDataset(r'E:\第一次实验\data', "t10k-images-idx3-ubyte.gz",
                           "t10k-labels-idx1-ubyte.gz", transform=transforms.ToTensor())

# #************************************a2torchloadlocalminist*********************************************************

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False)


# 设置网络结构
class Net(nn.Module):
    def __init__(self, in_features, out_features):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_features, 32, 3) #32,26,26
        self.max_pool1 = nn.MaxPool2d(kernel_size=2) # 32, 13, 13
        self.conv2 = nn.Conv2d(32, 64, 3) # 64, 11, 11
        self.max_pool2 = nn.MaxPool2d(kernel_size=2) # 64, 5, 5
        self.conv3 = nn.Conv2d(64, 64, 3) # 64,3,3
        self.dnn1 = nn.Linear(64*3*3, 64) #第一层全连接层 64
        self.dnn2 = nn.Linear(64, out_features) #第二层全连接层 10

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.max_pool1(x)
        x = F.relu(self.conv2(x))
        x = self.max_pool2(x)
        x = F.relu(self.conv3(x))
        # x = x.view(128,-1)
        x = x.view(-1,64*3*3)   # change,这样才能跟(64*3*3)匹配——self.dnn1 = nn.Linear(64*3*3, 64) #第一层全连接层 64
        x = F.relu(self.dnn1(x)) #relu激活
        x = self.dnn2(x)
        return x

net = Net(1, 10)

print(net)


# 开始训练
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.02)

for epoch in range(5):
    running_loss, running_acc = 0.0, 0.0
    for i, data in enumerate(train_loader, 1):  # 可以改为0,是对i的给值(循环次数从0开始计数还是从1开始计数的问题)
        img, label = data
        out = net(img)
        loss = criterion(out, label)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * label.size(0)
        _, predicted = torch.max(out, 1)
        running_acc += (predicted==label).sum().item()
        print('Epoch [{}/5], Step [{}/{}], Loss: {:.6f}, Acc: {:.6f}'.format(
            epoch + 1, i + 1, len(train_loader), loss.item(), (predicted==label).sum().item()/128))
    #测试
    test_loss, test_acc = 0.0, 0.0
    for i, data in enumerate(test_loader):
        img, label = data

        out = net(img)
        loss = criterion(out, label)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        test_loss += loss.item() * label.size(0)
        _, predicted = torch.max(out, 1)
        test_acc += (predicted == label).sum().item()

    print("Train {} epoch, Loss: {:.6f}, Acc: {:.6f}, Test_Loss: {:.6f}, Test_Acc: {:.6f}".format(
        epoch + 1, running_loss / (len(train_dataset)), running_acc / (len(train_dataset)),
        test_loss / (len(test_dataset)), test_acc / (len(test_dataset))))

展示:
pytorch搭建的cnn,训练mnist数据集【实测成功】_第2张图片

感谢大佬博主,代码参考大佬博主文章传送门

gpu版本代码(电脑里配置好gpu的可直接运行)

关于配置pytorch的gpu版本,具体步骤可参考博主文章cuda10.1的一步步详细安装记录

关于cpu版本程序转gpu运行,只需4行代码,可参考博主文章传送门

device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # add
print(device)  # add

net.to(device)  # add

inputs,labels=inputs.to(device),labels.to(device)  # add

关于查看是否是gpu在运行,可参考博主文章传送门

gpu代码

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import torchvision

import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import gzip
import os
import torchvision
import cv2
import matplotlib.pyplot as plt


# 加载MNIST数据集

# #************************************a2torchloadlocalminist*********************************************************
class DealDataset(Dataset):
    """
        读取数据、初始化数据
    """

    def __init__(self, folder, data_name, label_name, transform=None):
        (train_set, train_labels) = load_data(folder, data_name,
                                              label_name)  # 其实也可以直接使用torch.load(),读取之后的结果为torch.Tensor形式
        self.train_set = train_set
        self.train_labels = train_labels
        self.transform = transform

    def __getitem__(self, index):
        img, target = self.train_set[index], int(self.train_labels[index])
        if self.transform is not None:
            img = self.transform(img)
        return img, target

    def __len__(self):
        return len(self.train_set)


def load_data(data_folder, data_name, label_name):
    with gzip.open(os.path.join(data_folder, label_name), 'rb') as lbpath:  # rb表示的是读取二进制数据
        y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)

    with gzip.open(os.path.join(data_folder, data_name), 'rb') as imgpath:
        x_train = np.frombuffer(
            imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)
    return (x_train, y_train)


train_dataset = DealDataset(r'E:\第一次实验\data', "train-images-idx3-ubyte.gz",
                           "train-labels-idx1-ubyte.gz", transform=transforms.ToTensor())
test_dataset = DealDataset(r'E:\第一次实验\data', "t10k-images-idx3-ubyte.gz",
                           "t10k-labels-idx1-ubyte.gz", transform=transforms.ToTensor())

# #************************************a2torchloadlocalminist*********************************************************

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False)


# 设置网络结构
class Net(nn.Module):
    def __init__(self, in_features, out_features):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_features, 32, 3) #32,26,26
        self.max_pool1 = nn.MaxPool2d(kernel_size=2) # 32, 13, 13
        self.conv2 = nn.Conv2d(32, 64, 3) # 64, 11, 11
        self.max_pool2 = nn.MaxPool2d(kernel_size=2) # 64, 5, 5
        self.conv3 = nn.Conv2d(64, 64, 3) # 64,3,3
        self.dnn1 = nn.Linear(64*3*3, 64) #第一层全连接层 64
        self.dnn2 = nn.Linear(64, out_features) #第二层全连接层 10

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = self.max_pool1(x)
        x = F.relu(self.conv2(x))
        x = self.max_pool2(x)
        x = F.relu(self.conv3(x))
        # x = x.view(128,-1)
        x = x.view(-1,64*3*3)   # change,这样才能跟(64*3*3)匹配——self.dnn1 = nn.Linear(64*3*3, 64) #第一层全连接层 64
        x = F.relu(self.dnn1(x)) #relu激活
        x = self.dnn2(x)
        return x

device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")  # add
net = Net(1, 10)
net.to(device)  # add
print(net)


# 开始训练
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr=0.02)

for epoch in range(5):
    running_loss, running_acc = 0.0, 0.0
    for i, data in enumerate(train_loader, 1):  # 可以改为0,是对i的给值(循环次数从0开始计数还是从1开始计数的问题)
        img, label = data
        img, label = img.to(device), label.to(device)  # add
        out = net(img)
        loss = criterion(out, label)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        running_loss += loss.item() * label.size(0)
        _, predicted = torch.max(out, 1)
        running_acc += (predicted==label).sum().item()
        print('Epoch [{}/5], Step [{}/{}], Loss: {:.6f}, Acc: {:.6f}'.format(
            epoch + 1, i + 1, len(train_loader), loss.item(), (predicted==label).sum().item()/128))
    #测试
    test_loss, test_acc = 0.0, 0.0
    for i, data in enumerate(test_loader):
        img, label = data
        img, label = img.to(device), label.to(device)  # add

        out = net(img)
        loss = criterion(out, label)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        test_loss += loss.item() * label.size(0)
        _, predicted = torch.max(out, 1)
        test_acc += (predicted == label).sum().item()

    print("Train {} epoch, Loss: {:.6f}, Acc: {:.6f}, Test_Loss: {:.6f}, Test_Acc: {:.6f}".format(
        epoch + 1, running_loss / (len(train_dataset)), running_acc / (len(train_dataset)),
        test_loss / (len(test_dataset)), test_acc / (len(test_dataset))))

展示:

pytorch搭建的cnn,训练mnist数据集【实测成功】_第3张图片

————————————————————————————————————

(关于mnist数据集的描述,博主收藏文章传送门)

pytorch搭建的cnn,训练mnist数据集【实测成功】_第4张图片

你可能感兴趣的:(python)