pytorch实现训练Cifar10

import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import torch


class LeNet(nn.Module):
    # 构造方法里面定义可学习参数的层
    def __init__(self):
        super(LeNet, self).__init__()
        self.conv1 = nn.Conv2d(3, 6, 5)
        self.conv2 = nn.Conv2d(6, 16, 5)
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    # forward定义输入数据进行前向传播
    def forward(self, x):
        x = F.max_pool2d(F.relu(self.conv1(x)), 2)
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        x = x.view(x.size()[0], -1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)

        return x


if __name__ == '__main__':

    # 初始化网络
    net = LeNet()

    # 数据预处理
    transform = transforms.Compose([transforms.ToTensor(),
                                    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),])
    # 加载数据集
    train_set = torchvision.datasets.CIFAR10(root="./data",
                                             train=True, download=True, transform=transform)
    train_loader = torch.utils.data.DataLoader(train_set,
                                                     batch_size=4, shuffle=True, num_workers=2)
    test_set = torchvision.datasets.CIFAR10(root="./data",
                                             train=False, download=True, transform=transform)
    test_loader = torch.utils.data.DataLoader(train_set,
                                                     batch_size=4, shuffle=False, num_workers=2)

    # 定义损失和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)

    # train
    for epoch in range(2):
        running_loss = 0
        for i, data in enumerate(train_loader):
            net.zero_grad()

            inputs, labels = data
            output = net(inputs)
            train_loss = criterion(output, labels)
            train_loss.backward()
            optimizer.step()

            running_loss += train_loss.data
            if i % 2000 == 1999:
                print("%d,%d loss: = %f" % (epoch, i+1, running_loss/2000))
                running_loss = 0

    # test
    correct = 0
    total_labels = 0
    for data in test_loader:
        inputs, labels = data
        total_labels += labels.size()[0]
        output = net(inputs)
        _, pred = torch.max(output.data, 1)
        correct += (pred == labels).sum()
    print("accuracy:%d" % (correct*100 / total_labels))

你可能感兴趣的:(pytorch学习)