本文针对本人学习pytorch的过程进行记录,并非总结型笔记,只是针对使用过程中常用且重要的操作予以记录,以便日后进行查看。
import torch
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])#class torchvision.transforms.Normalize(mean, std)
#此转换类作用于torch.*Tensor。给定均值(R, G, B)和标准差(R, G, B),用公式channel = (channel - mean) / std进行规范化
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=50,
shuffle=True, num_workers=1)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=50,
shuffle=False, num_workers=1)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# 3*32*32
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Sequential( # input shape (3, 32, 32)
nn.Conv2d(
in_channels=3, # input height
out_channels=16, # n_filters
kernel_size=5, # filter size
stride=1, # filter movement/step
padding=2, # if want same width and length of this image after Conv2d, padding=(kernel_size-1)/2 if stride=1
), # output shape (16, 32, 32)
nn.ReLU(), # activation
nn.MaxPool2d(kernel_size=2), # choose max value in 2x2 area, output shape (16, 16, 16)
)
self.conv2 = nn.Sequential( # input shape (16, 16, 16)
nn.Conv2d(16, 32, 5, 1, 2), # output shape (32, 16, 16)
nn.ReLU(), # activation
nn.MaxPool2d(2), # output shape (32, 8, 8)
)
self.out = nn.Linear(32 * 8 * 8, 10) # fully connected layer, output 10 classes
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(-1, 32 * 8 * 8,) # flatten the output of conv2 to (batch_size, 32 * 8 * 8)
output = self.out(x)
return output # return x for visualization
# our model
net = Net()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)
if __name__ == '__main__':
# Train the network
for epoch in range(1):
for i, data in enumerate(trainloader):
inputs, labels = data
inputs, labels = Variable(inputs), Variable(labels)
outputs = net(inputs)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("Finished Training")
print("Beginning Testing")
correct = 0
total = 0
for data in testloader:
images, labels = data
outputs = net(Variable(images))
predicted = torch.max(outputs, 1)[1].data.numpy()
total += labels.size(0)
correct += (predicted == labels.data.numpy()).sum()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))