【PyTorch】卷积神经网络LeNet5-训练Fashion-MNIST

使用fashion-MNIST演示PyTorch实现卷积神经网络LeNet5的创建、训练和测试

导入依赖包

import torch
import torch.nn as nn
import torchvision
import torch.utils.data as Data
import torchvision.transforms as transforms
import sys

加载数据集

mnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST',train=True, download=True, transform=transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='~/Datasets/FashionMNIST',train=False, download=True, transform=transforms.ToTensor())

batch_size = 128
if sys.platform.startswith('win'):
    num_workers = 0
else:
    num_workers = 4
    
train_iter = Data.DataLoader(mnist_train,batch_size=batch_size,shuffle=True,num_workers=num_workers)
test_iter = Data.DataLoader(mnist_test,batch_size=batch_size,shuffle=False,num_workers=num_workers)

创建模型

class LeNet(nn.Module):
    def __init__(self):
        super(LeNet,self).__init__()
        self.feature = nn.Sequential(
            nn.Conv2d(1,6,5),
            nn.Sigmoid(),
            nn.MaxPool2d(2,2),
            nn.Conv2d(6,16,5),
            nn.Sigmoid(),
            nn.MaxPool2d(2,2)
        )
        self.classifer = nn.Sequential(
            nn.Linear(4*4*16,120),
            nn.Sigmoid(),
            nn.Linear(120,84),
            nn.Sigmoid(),
            nn.Linear(84,10),
        )
    def forward(self,x):
        feature = self.feature(x)
        classifer = self.classifer(feature.view(feature.shape[0],-1))
        return classifer

net = LeNet()
print(net)

LeNet(
(feature): Sequential(
(0): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1))
(1): Sigmoid()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(3): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))
(4): Sigmoid()
(5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(classifer): Sequential(
(0): Linear(in_features=256, out_features=120, bias=True)
(1): Sigmoid()
(2): Linear(in_features=120, out_features=84, bias=True)
(3): Sigmoid()
(4): Linear(in_features=84, out_features=10, bias=True)
)
)


模型训练

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Traing on ",device)
num_epochs = 10
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(),lr=0.005)
net = net.to(device)

step = 0
for epoch in range(1,num_epochs+1):
    for x,y in train_iter:
        net.train()
        x,y = x.to(device),y.to(device)
        out = net(x)
        l = loss(out,y)
        optimizer.zero_grad()
        l.backward()
        optimizer.step()
        step += 1
        if step%100==0:
            with torch.no_grad():
                net.eval()
                acc_sum = (net(x).argmax(dim=1)==y).float().sum().item()
                print("Epoch:{},Step:{},Loss:{},train acc:{}".format(epoch,step,l.item(),acc_sum/x.shape[0]))
              

Traing on cpu
Epoch:1,Step:100,Loss:1.04990553855896,train acc:0.5234375
Epoch:1,Step:200,Loss:0.7416746616363525,train acc:0.75
Epoch:1,Step:300,Loss:0.6807045936584473,train acc:0.7265625
Epoch:1,Step:400,Loss:0.5523214936256409,train acc:0.8046875
Epoch:2,Step:500,Loss:0.7290717959403992,train acc:0.6796875

Epoch:9,Step:4200,Loss:0.27126845717430115,train acc:0.9140625
Epoch:10,Step:4300,Loss:0.32214048504829407,train acc:0.890625
Epoch:10,Step:4400,Loss:0.3317326307296753,train acc:0.875
Epoch:10,Step:4500,Loss:0.2592892050743103,train acc:0.890625
Epoch:10,Step:4600,Loss:0.2676653265953064,train acc:0.90625


模型测试

def evaluate_accuracy(data_iter,net):
    acc_sum,n = 0.,0
    with torch.no_grad():
        net.eval()
        for x,y in data_iter:
            acc_sum += (net(x).argmax(dim=1)==y).float().sum().item()
            n += y.shape[0]
    return acc_sum/n

print(evaluate_accuracy(test_iter,net))

0.8816


你可能感兴趣的:(PyTorch,python,开发,神经网络,深度学习,pytorch)