利用pytorch搭建lenet网络,并利用mnist数据集进行训练测试

最近在学习pytorch,手工复现了LeNet网络,并附源码如下,欢迎大家留言交流

import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms

class LeNet(nn.Module):
    def __init__(self):
        super(LeNet,self).__init__()
        self.conv1 = nn.Conv2d(1, 6, 5, 1,)
        self.conv2 = nn.Conv2d(6, 16, 5, 1)
        self.fc1 = nn.Linear(16*4*4, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        #x:1*28*28
        x = F.max_pool2d(self.conv1(x), 2, 2)
        x = F.max_pool2d(self.conv2(x), 2, 2)
        x = x.view(-1, 16*4*4)
        x = self.fc1(x)
        x = self.fc2(x)
        x = self.fc3(x)
        return F.log_softmax(x,dim=1)

def train(model, device, train_loader, optimizer, epoch):
    model.train()
    for idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)

        pred = model(data)
        loss = F.nll_loss(pred,target)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if idx % 100 == 0:
            print("Train Epoch: {}, iterantion: {}, Loss: {}".format(epoch, idx, loss.item()))

def test(model, device, test_loader):
    model.eval()
    total_loss = 0.
    correct = 0.
    with torch.no_grad():
        for idx, (data, target) in enumerate(test_loader):
            data, target = data.to(device), target.to(device)

            output = model(data)
            total_loss += F.nll_loss(output, target, reduction="sum").item()
            pred = output.argmax(dim=1)
            correct +=pred.eq(target.view_as(pred)).sum().item()

        total_loss /= len(test_loader.dataset)
        acc = correct/len(test_loader.dataset)*100
        print("Test loss: {}, Accuracy: {}".format(total_loss, acc))



device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
batch_size = 32
train_dataloader = torch.utils.data.DataLoader(
    datasets.MNIST("./MNIST_data", train=True, download=False,
                   transform = transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,),(0.3081,))
                      
                   ])),
    batch_size =batch_size, shuffle = True,
    num_workers = 1, pin_memory = True # True加快训练
)
test_dataloader = torch.utils.data.DataLoader(
    datasets.MNIST("./MNIST_data", train=False, download=False,
                   transform = transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,),(0.3081,))
                   ])),
    batch_size =batch_size, shuffle = True,
    num_workers = 1, pin_memory = True
)

if __name__ == '__main__':
    lr = 0.01
    momentum = 0.5
    model = LeNet().to(device)
    optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)

    num_eopchs =2
    for eopch in range(num_eopchs):
        train(model,device,train_dataloader,optimizer,eopch)
        test(model,device,test_dataloader)

    torch.save(model.state_dict(),"mnist_lenet.pt")

 

训练测试Loss和Acc如下:由于本人电脑安装的是cpu版本的pytorch,训练测试结果可能与GPU结果有所不同。

Train Epoch: 0, iterantion: 0, Loss: 2.317941427230835
Train Epoch: 0, iterantion: 100, Loss: 0.6644562482833862
Train Epoch: 0, iterantion: 200, Loss: 0.24840769171714783
Train Epoch: 0, iterantion: 300, Loss: 0.4052567183971405
Train Epoch: 0, iterantion: 400, Loss: 0.4134133756160736
Train Epoch: 0, iterantion: 500, Loss: 0.22553721070289612
Train Epoch: 0, iterantion: 600, Loss: 0.16834495961666107
Train Epoch: 0, iterantion: 700, Loss: 0.4989162087440491
Train Epoch: 0, iterantion: 800, Loss: 0.06609301269054413
Train Epoch: 0, iterantion: 900, Loss: 0.09795038402080536
Train Epoch: 0, iterantion: 1000, Loss: 0.07747422158718109
Train Epoch: 0, iterantion: 1100, Loss: 0.10496354848146439
Train Epoch: 0, iterantion: 1200, Loss: 0.10571342706680298
Train Epoch: 0, iterantion: 1300, Loss: 0.13820703327655792
Train Epoch: 0, iterantion: 1400, Loss: 0.08925780653953552
Train Epoch: 0, iterantion: 1500, Loss: 0.13711456954479218
Train Epoch: 0, iterantion: 1600, Loss: 0.2791351079940796
Train Epoch: 0, iterantion: 1700, Loss: 0.055826544761657715
Train Epoch: 0, iterantion: 1800, Loss: 0.12501437962055206
Test loss: 0.11120971865653992, Accuracy: 96.6
Train Epoch: 1, iterantion: 0, Loss: 0.11889153718948364
Train Epoch: 1, iterantion: 100, Loss: 0.04244357347488403
Train Epoch: 1, iterantion: 200, Loss: 0.07095445692539215
Train Epoch: 1, iterantion: 300, Loss: 0.05811166763305664
Train Epoch: 1, iterantion: 400, Loss: 0.019630610942840576
Train Epoch: 1, iterantion: 500, Loss: 0.20999518036842346
Train Epoch: 1, iterantion: 600, Loss: 0.1654982566833496
Train Epoch: 1, iterantion: 700, Loss: 0.1235302984714508
Train Epoch: 1, iterantion: 800, Loss: 0.1591937392950058
Train Epoch: 1, iterantion: 900, Loss: 0.0024472326040267944
Train Epoch: 1, iterantion: 1000, Loss: 0.07611808180809021
Train Epoch: 1, iterantion: 1100, Loss: 0.15021288394927979
Train Epoch: 1, iterantion: 1200, Loss: 0.12693913280963898
Train Epoch: 1, iterantion: 1300, Loss: 0.008730217814445496
Train Epoch: 1, iterantion: 1400, Loss: 0.034120798110961914
Train Epoch: 1, iterantion: 1500, Loss: 0.03648284077644348
Train Epoch: 1, iterantion: 1600, Loss: 0.013119161128997803
Train Epoch: 1, iterantion: 1700, Loss: 0.016608938574790955
Train Epoch: 1, iterantion: 1800, Loss: 0.05516229569911957
Test loss: 0.0660199611902237, Accuracy: 97.8

 

你可能感兴趣的:(利用pytorch搭建lenet网络,并利用mnist数据集进行训练测试)