import torch as t
import torchvision as tv
import torchvision.transforms as transforms
from torchvision.transforms import ToPILImage
import matplotlib.pyplot as plt
show=ToPILImage() #可以把Tensor转为Image,方便可视化
#第一次运行程序时torchvision会自动下载CIFAR-10数据集,如果已经下载,可通过root指定
#定义对数据的预处理
transform=transforms.Compose([
transforms.ToTensor(),#转为Tensor
transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))#数据归一化
])
#训练集
trainset=tv.datasets.CIFAR10(
root='/home/cy/data',
train=True,
download=True,
transform=transform)
trainloader=t.utils.data.DataLoader(
trainset,batch_size=4,shuffle=True,num_workers=0)
#测试集
testset=tv.datasets.CIFAR10(
'/home/cy/data/',
train=False,
download=True,
transform=transform
)
testloader=t.utils.data.DataLoader(
testset,
batch_size=4,
shuffle=False,
num_workers=2
)
classes=('plane','car','bird','cat','deer','dog','frog','horse','ship','truck')
(data,label)=trainset[100]
print(classes[label])
#(data+1)/2是为了还原被归一化的数据
img=show((data+1)/2).resize((100,100))
plt.imshow(img)
'''
dataiter=iter(trainloader)
images,labels=dataiter.next()#返回4张图片及标签
print(''.join('%11s'%classes[labels[j]] for j in range(4)))
show(tv.utils.make_grid((images+1)/2)).resize((400,100))
'''
import torch as t
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Net(nn.Module):
def __init__(self):#构造函数
#nn.Module子类的函数必须在构造函数中执行父类的构造函数
#下式等价于nn.Module.__init__(self)
super(Net,self).__init__()
#卷积层'1'表示输入图片为单通道,6表示输出通道数
#'5'表示卷积核为5*5
self.conv1=nn.Conv2d(3,6,5)
#卷积层
self.conv2=nn.Conv2d(6,16,5)
#仿射层/全连接层,y=Wx+b
self.fc1=nn.Linear(16*5*5,120)
self.fc2=nn.Linear(120,84)
self.fc3=nn.Linear(84,10)
#定义好forward函数,backward会被自动实现
def forward(self,x):
#卷积->激化->池化
x=F.max_pool2d(F.relu(self.conv1(x)),(2,2))
x=F.max_pool2d(F.relu(self.conv2(x)),2)
#reshape,'-1'表示自适应
x=x.view(x.size()[0],-1)
x=F.relu(self.fc1(x))
x=F.relu(self.fc2(x))
x=self.fc3(x)
return x
net=Net()
print(net)
from torch import optim
criterion = nn.CrossEntropyLoss() # 交叉熵损失函数
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#训练网络
'''
训练网络的流程:输入数据,向前传播+反向传播怕,更新参
'''
#if __name__=='__main__':看作python模拟的程序入口
if __name__=='__main__':
for epoch in range(2):
running_loss=0.0
for i,data in enumerate(trainloader,0):
#输入数据
inputs,labels=data
inputs,labels=Variable(inputs),Variable(labels)
#梯度清零
optimizer.zero_grad()
#forward+backward
outputs=net(inputs)
loss=criterion(outputs,labels)
loss.backward()
#更新参数
optimizer.step()
#打印log信息
running_loss+=loss.item()
if i%2000==1999:
print('[%d,%5d] loss:%.3f' \
% (epoch+1,i+1,running_loss /2000))
running_loss=0.0
print('Finished Training')
if __name__=='__main__':
dataiter=iter(testloader)
images,labels=dataiter.next() #一个batch返回4张图片
print('实际的label: ',' '.join(\
'%08s'%classes[labels[j]] for j in range(4)))
plt.imshow(show(tv.utils.make_grid(images/2-0.5)).resize((400,100)))
outputs=net(Variable(images))
#得分最高的那个类
_,predicted=t.max(outputs.data,1)
print('预测结果:',' '.join('%5s'\
% classes[predicted[j]] for j in range(4)))
correct=0#预测正确的图片数
total=0
for data in testloader:
images,labels=data
outputs=net(Variable(images))
_,predicted=t.max(outputs.data,1)
total+=labels.size(0)
correct+=(predicted==labels).sum()
print('10000张测试集中的准确率为:%d %%' %(100*correct/total))
'''
在GPU上训练
if t.cuda.is_available():
net.cuda()
images=images.cuda()
labels=labels.cuda()
output=net(Variable(images))
loss=criterion(output,Variable(labels))