pytorch 搭建简单网络训练CIFAR-10数据集

CIFAR-10数据集是一个常用的彩色图片数据集,它有十个类别(‘plane’, ‘car’, ‘bird’, ‘cat’, ‘deer’, ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘trunk’),每张图片都是3 x 32 x 32,即三通道彩色图片,分辨率为32 x 32

import torchvision as tv
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
show = ToPILImage() # 可以把 Tensor 转成 Image,方便可视化

# 定义对数据的预处理
transform = transforms.Compose([
    transforms.ToTensor(), # 转为Tensor
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)
                        ) # 归一化
])

#  训练集
trainset = tv.datasets.CIFAR10(root=r"C:\Users\fox\data", train=True, download=True, transform=transform)

trainloader = t.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)

# 测试集
testset = tv.datasets.CIFAR10(r"C:\Users\fox\data", train=False, download=True, transform=transform)

testloader = t.utils.data.DataLoader(
                            testset,
                            batch_size=4,
                            shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'trunk')

#定义网络
class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        
        # 1:输入单通道 ,   6:输出通道数    5:卷积核
        self.conv1 = nn.Conv2d(3, 6, 5)
        
        # 卷积层
        self.conv2 = nn.Conv2d(6, 16, 5)
        
        # 仿射层/全连接层, y = Wx + b
        self.fc1 = nn.Linear(16*5*5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)
    
    def forward(self, x):
        # 卷积 -> 激活 -> 池化
        x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
        x = F.max_pool2d(F.relu(self.conv2(x)), 2)
        # reshape '-1' 表示自适应
        x = x.view(x.size()[0], -1)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
net = Net()

# 定义损失函数和优化器
from torch import optim
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)

for epoch in range(10):
    running_loss = 0.0
    for i, data in enumerate(trainloader, 0):
        # 输入数据
        inputs, labels = data
        inputs, labels = Variable(inputs), Variable(labels)
        
        # 梯度清零
        optimizer.zero_grad()
        
        # forward + backward
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        
        # 更新参数
        optimizer.step()
        
        # 打印log信息
        running_loss += loss.data
        if i % 2000 == 1999: # 每2000个batch打印一次训练状态
            print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000))
            running_loss = 0.0
print('Finish training')

计算准确率

correct = 0 # 预测正确的图片书
total = 0 # 总共的图片数
for data in testloader:
    images, labels = data
    outputs = net(Variable(images))
    _, predicted = t.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()
print("10000张测试集中的准确率为:%d %%" % (100 * correct / total))
print(correct, total)

你可能感兴趣的:(机器学习,pytorch,深度学习,神经网络)