pytorch 实现 DenseNet on Fashion-MNIST
from __future__ import print_function
import torch
import time
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision.transforms import ToPILImage
show=ToPILImage()
import numpy as np
import matplotlib.pyplot as plt
#
batchSize=32
##load data
transform = transforms.Compose([transforms.Resize(96),transforms.ToTensor(),transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.FashionMNIST(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batchSize, shuffle=True, num_workers=0)
testset = torchvision.datasets.FashionMNIST(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batchSize, shuffle=False, num_workers=0)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def imshow(img):
img = img / 2 + 0.5
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
####network
class conv_blk(nn.Module):
def __init__(self,in_channel,num_channel):
super(conv_blk, self).__init__()
self.blk=nn.Sequential(nn.BatchNorm2d(in_channel,eps=1e-3),
nn.ReLU(),
nn.Conv2d(in_channels=in_channel,out_channels=num_channel,kernel_size=3,padding=1))
def forward(self, x):
return self.blk(x)
class DenseBlock(nn.Module):
def __init__(self,in_channel,num_convs,num_channels):
super(DenseBlock,self).__init__()
layers=[]
for i in range(num_convs):
layers+=[conv_blk(in_channel,num_channels)]
in_channel=in_channel+num_channels
self.net=nn.Sequential(*layers)
def forward(self,x):
for blk in self.net:
y=blk(x)
x=torch.cat((x,y),dim=1)
return x
def transition_blk(in_channel,num_channels):
blk=nn.Sequential(nn.BatchNorm2d(in_channel,eps=1e-3),
nn.ReLU(),
nn.Conv2d(in_channels=in_channel,out_channels=num_channels,kernel_size=1),
nn.AvgPool2d(kernel_size=2,stride=2))
return blk
class DenseNet(nn.Module):
def __init__(self,in_channel,num_classes):
super(DenseNet,self).__init__()
self.block1=nn.Sequential(nn.Conv2d(in_channels=in_channel,out_channels=64,kernel_size=7,stride=2,padding=3),
nn.BatchNorm2d(64,eps=1e-3),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3,stride=2,padding=1))
num_channels, growth_rate = 64, 32 # num_channels:当前的通道数。
num_convs_in_dense_blocks = [4, 4, 4, 4]
layers=[]
for i ,num_convs in enumerate(num_convs_in_dense_blocks):
layers+=[DenseBlock(num_channels,num_convs,growth_rate)]
num_channels+=num_convs*growth_rate
if i!=len(num_convs_in_dense_blocks)-1:
layers+=[transition_blk(num_channels,num_channels//2)]
num_channels=num_channels//2
layers+=[nn.BatchNorm2d(num_channels),nn.ReLU(),nn.AvgPool2d(kernel_size=3)]
self.block2=nn.Sequential(*layers)
self.dense=nn.Linear(248,10)
def forward(self,x):
y=self.block1(x)
y=self.block2(y)
y=y.view(-1,248)
y=self.dense(y)
return y
net=DenseNet(1,10).cuda()
print (net)
criterion=nn.CrossEntropyLoss()
optimizer=optim.SGD(net.parameters(),lr=0.1,momentum=0.9)
#train
print ("training begin")
for epoch in range(3):
start = time.time()
running_loss=0
for i,data in enumerate(trainloader,0):
# print (inputs,labels)
image,label=data
image=image.cuda()
label=label.cuda()
image=Variable(image)
label=Variable(label)
# imshow(torchvision.utils.make_grid(image))
# plt.show()
# print (label)
optimizer.zero_grad()
outputs=net(image)
# print (outputs)
loss=criterion(outputs,label)
loss.backward()
optimizer.step()
running_loss+=loss.data
if i%100==99:
end=time.time()
print ('[epoch %d,imgs %5d] loss: %.7f time: %0.3f s'%(epoch+1,(i+1)*batchSize,running_loss/100,(end-start)))
start=time.time()
running_loss=0
print ("finish training")
#test
net.eval()
correct=0
total=0
for data in testloader:
images,labels=data
images=images.cuda()
labels=labels.cuda()
outputs=net(Variable(images))
_,predicted=torch.max(outputs,1)
total+=labels.size(0)
correct+=(predicted==labels).sum()
print('Accuracy of the network on the %d test images: %d %%' % (total , 100 * correct / total))
运行过程
DenseNet(
(block1): Sequential(
(0): Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3))
(1): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(2): ReLU()
(3): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
)
(block2): Sequential(
(0): DenseBlock(
(net): Sequential(
(0): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(64, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(64, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(1): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(96, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(96, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(2): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(3): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(160, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(160, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
)
(1): Sequential(
(0): BatchNorm2d(192, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(192, 96, kernel_size=(1, 1), stride=(1, 1))
(3): AvgPool2d(kernel_size=2, stride=2, padding=0)
)
(2): DenseBlock(
(net): Sequential(
(0): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(96, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(96, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(1): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(128, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(128, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(2): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(160, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(160, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(3): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(192, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(192, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
)
(3): Sequential(
(0): BatchNorm2d(224, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(224, 112, kernel_size=(1, 1), stride=(1, 1))
(3): AvgPool2d(kernel_size=2, stride=2, padding=0)
)
(4): DenseBlock(
(net): Sequential(
(0): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(112, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(112, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(1): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(144, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(144, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(2): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(176, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(176, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(3): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(208, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(208, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
)
(5): Sequential(
(0): BatchNorm2d(240, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(240, 120, kernel_size=(1, 1), stride=(1, 1))
(3): AvgPool2d(kernel_size=2, stride=2, padding=0)
)
(6): DenseBlock(
(net): Sequential(
(0): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(120, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(120, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(1): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(152, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(152, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(2): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(184, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(184, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
(3): conv_blk(
(blk): Sequential(
(0): BatchNorm2d(216, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
(1): ReLU()
(2): Conv2d(216, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
)
)
)
)
(7): BatchNorm2d(248, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(8): ReLU()
(9): AvgPool2d(kernel_size=3, stride=3, padding=0)
)
(dense): Linear(in_features=248, out_features=10, bias=True)
)
training begin
[epoch 1,imgs 12800] loss: 0.8031437 time: 3.746 s
[epoch 1,imgs 25600] loss: 0.4905535 time: 3.741 s
[epoch 1,imgs 38400] loss: 0.4159799 time: 3.723 s
[epoch 1,imgs 51200] loss: 0.3700477 time: 3.730 s
[epoch 2,imgs 12800] loss: 0.3134363 time: 3.735 s
[epoch 2,imgs 25600] loss: 0.3039782 time: 3.712 s
[epoch 2,imgs 38400] loss: 0.2954054 time: 3.730 s
[epoch 2,imgs 51200] loss: 0.2753612 time: 3.740 s
[epoch 3,imgs 12800] loss: 0.2466343 time: 3.724 s
[epoch 3,imgs 25600] loss: 0.2562811 time: 3.729 s
[epoch 3,imgs 38400] loss: 0.2449950 time: 3.736 s
[epoch 3,imgs 51200] loss: 0.2470676 time: 3.724 s
finish training
Accuracy of the network on the 10000 test images: 91 %