如何用Pytorch包处理数据
常用的:
(1)使用torchvision加载并且归一化CIFAR10的训练和测试数据集
(2)定义一个卷积神经网络
(3)定义一个损失函数
(4)在训练样本数据上训练网络
(5)在测试样本数据上测试网络
(1)使用torchvision加载并且归一化CIFAR10的训练和测试数据集
# 导入包
import torch
import torchvision
import torchvision.transforms as transforms# 用于数据归一化
# transforms.Compose([])串联一系列图像操作,注:用[]。
transform = transforms.Compose(
[transforms.ToTensor(),# 将图像转化为[0-1]间的tensor;
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])# 将图像归一化为[-1,1]的tensor。
# 其中,前面的(0.5,0.5,0.5) 是数据集图像 R G B 三个通道上的均值, 后面(0.5, 0.5, 0.5)
# 是三个通道的标准差,公式,image =(图像-平均值)/ std, 如最小值0将转换为
# (0-0.5)/0.5=-1,
# 注:不同数据集参数不同,自己制作数据集建议统计三通道均值及其标准差
# 获取训练数据集,注:root为训练集路径;train=True表示获取训练数据集;
# download=True表示下载训练集,下载之后可调整为False;
# transform表示运用上述定义的图像归一化方法
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
# 加载训练集,注:shuffle=True表示将数据集随机打乱;batch_size=4表示批量大小为4;
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
# 加载测试集
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# 数据集类别
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
显示部分训练图片
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# 输出
deer ship truck deer
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
(3)定义损失函数和优化器
import torch.optim as optim
# 定义交叉熵损失函数
criterion = nn.CrossEntropyLoss()
# 优化器:随机梯度下降,lr学习率;momentum动量。
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
(4)在训练样本数据上训练网络
for epoch in range(2): # 定义训练epoch,2个周期。
running_loss = 0.0# 损失初始化为0
# enumerate()用于可迭代\可遍历的数据对象组合为一个索引序列,并列出数据和数据下标.
# 上面代码的0表示从索引从0开始,假如为1的话,那索引就从1开始。
for i, data in enumerate(trainloader, 0):# i为data索引;data为可迭代数据,
# 即trainloader。
inputs, labels = data# data=trainloader由inputs=图像和labels组成。
# 优化器初始归零
optimizer.zero_grad()
# 前向传播得到输出
outputs = net(inputs)
# 计算损失
loss = criterion(outputs, labels)
# 反向传播损失值
loss.backward()
# 优化器迭代
optimizer.step()
# 输出损失,注:
running_loss += loss.item()# 获取loss=tensor元素数值(具体、精确数值,
# 而不是tensor),计算损失。
if i % 2000 == 1999: # 每2000个batch打印状态
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
# 输出
[1, 2000] loss: 2.187
[1, 4000] loss: 1.852
[1, 6000] loss: 1.672
[1, 8000] loss: 1.566
[1, 10000] loss: 1.490
[1, 12000] loss: 1.461
[2, 2000] loss: 1.389
[2, 4000] loss: 1.364
[2, 6000] loss: 1.343
[2, 8000] loss: 1.318
[2, 10000] loss: 1.282
[2, 12000] loss: 1.286
Finished Training
(5)在测试样本数据上测试网络
correct = 0
total = 0
with torch.no_grad():# 禁用梯度更新
for data in testloader:# 在测试集中迭代
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)# _表示程序不关系_输出值,一般写为_;
# predicted 为函数返回的每行最大值的索引。
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
总程序
import torch
import torchvision # torchvision包含一些常用的数据集、模型、转换函数等等
import torchvision.transforms as transforms
# 图像归一化
transform = transforms.Compose(
[transforms.ToTensor(),# 将灰度图转化为(0-1)的tensor
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] # 将[0-1]的tensor转化为(-1, 1),
# 前面的(0.5,0.5,0.5) 是图像 R G B 三个通道上的均值, 后面(0.5, 0.5, 0.5)是三个通道的标准差,
# 公式,image =(图像-平均值)/ std, 如最小值0将转换为(0-0.5)/0.5=-1,
# 注:不同数据集参数不同,自己制作数据集建议统计三通道均值及其标准差
)
# 用torchvision读取训练数据集
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=False, transform=transform)
# 将训练集添加到loader并设置batch size等参数
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
# 读取测试集
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=False, transform=transform)
# 添加到loader
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# 数据集类别
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
import matplotlib.pyplot as plt
import numpy as np
def imshow(img):
img = img/2+0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# if __name__ == '__main__':
# # get some random training images
# dataiter = iter(trainloader)
# images, labels = dataiter.next()
#
# # show images
# imshow(torchvision.utils.make_grid(images))
# # print labels
# print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
def train(trainloader):
import torch.optim as optim
# 定义交叉熵损失函数
criterion = nn.CrossEntropyLoss()
# 建立优化器。选择随机梯度下降对网络参数net.parameters优化
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
# 训练网络
for epoch in range(2): # 在数据集上循环多次epoch
# 损失初始化
running_loss = 0.0
# 在trainloader中建立循环, enumerate()用于可迭代\可遍历的数据对象组合为一个索引序列,同时列出数据和数据下标.
# 0表示从索引从0开始
for i, data in enumerate(trainloader, 0): # i = iteration
# data里面包含图像数据(inputs)(tensor类型的)和标签(labels)(tensor类型)
inputs, labels = data
# 将参数梯度归零
optimizer.zero_grad()
# 开始前向传播
outputs = net(inputs)
# 计算损失
loss = criterion(outputs, labels)
# 反向传播
loss.backward()
# 更新参数
optimizer.step()
# 输出损失值
running_loss += loss.item() # loss.item为了得到tensor loss的值,+=计算一个epoch的损失之和
if i % 2000 == 1999: # 每2000个小批量打印
print('[%d, %5d] loss: %.3f' % # %d整数, %5d长度为5的整数, %.3f精确到小数点后三位
(epoch + 1, i + 1, running_loss / 2000)) # 因为从0开始,为显示方便,+1
# 结束,loss再初始化,进行下一个循环
running_loss = 0.0
print('Finished Training')
# if __name__ == '__main__':
# dataiter = iter(testloader) # 创建测试数据迭代器
# images, labels = dataiter.next()
# # imshow(torchvision.utils.make_grid(images))
# # print('GrandTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
# outputs = net(images)
# # print(outputs.size())
# _, predicted = torch.max(outputs, 1) # _为每行的最大值。predicted为每行最大值的索引。
#
# # print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
# # for j in range(4)))
def text10000(testloader):
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
def text_performed(testloader):
# 创建并初始化列表为[0.,0.,...,0.]共十个
class_corrext = list(0. for i in range(10))
class_total = list(0. for i in range(10))
# 测试时,禁止跟踪梯度梯度,节省内存(显存)
with torch.no_grad():
# 从textloader中读取数据, images和labels
for data in testloader:
# batch size个images及labels
images, labels = data
outputs = net(images)
# _为每行的最大值。predicted为每行最大值的索引(labels),1表示行,0表示列。
# print(outputs.size())
_, predicted = torch.max(outputs.data, 1)
# 获取预判正确的个数
c = (predicted == labels).squeeze()
# 每个批量(4)中求预测标签与实际标签相等的总数
for i in range(4):
label = labels[i]
# 预测标签与实际标签相等的总数,满足c条件的加过去。
class_corrext[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
# 打印数据集类别及对应预测准确率
classes[i], 100 * class_corrext[i] / class_total[i]
))
if __name__ == '__main__':
train(trainloader)
text10000(testloader)
参考
http://pytorch123.com/SecondSection/training_a_classifier/