pytorch学习:resnet做CIFAR10分类代码

# -*- coding: utf-8 -*-
"""
Created on Tue Sep  4 21:17:05 2018

@author: www
"""

import sys
sys.path.append("...")

import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.datasets import CIFAR10

def conv3x3(in_channel, out_channel, stride=1):
     return nn.Conv2d(in_channel, out_channel, 3, stride=stride, padding=1, bias=False)
     
class residual_block(nn.Module):
     def __init__(self, in_channel, out_channel, same_shape=True):
          super(residual_block, self).__init__()
          self.same_shape = same_shape
          stride = 1 if self.same_shape else 2
          
          self.conv1 = conv3x3(in_channel, out_channel, stride=stride)
          self.bn1 = nn.BatchNorm2d(out_channel)
          
          self.conv2 = conv3x3(out_channel, out_channel)
          self.bn2 = nn.BatchNorm2d(out_channel)
          if not self.same_shape:
               self.conv3 = nn.Conv2d(in_channel, out_channel, 1, stride=stride)
          
     def forward(self, x):
          out = self.conv1(x)
          out = F.relu(self.bn1(out), True)
          out = self.conv2(out)
          out = F.relu(self.bn2(out), True)
          
          if not self.same_shape:
               x = self.conv3(x)
          return F.relu(x+out, True)
          
#我们测试一下一个 residual block 的输入和输出
# 输入输出形状相同
test_net = residual_block(32, 32)
test_x = Variable(torch.zeros(1, 32, 96, 96))
print('input:{}'.format(test_x.shape))
test_y = test_net(test_x)
print('output:{}'.format(test_y.shape))

## 输入输出形状不同
test_net = residual_block(32, 32, False)
test_x = Variable(torch.zeros(1, 32, 96, 96))
print('input: {}'.format(test_x.shape))
test_y = test_net(test_x)
print('output: {}'.format(test_y.shape))

#下面我们尝试实现一个 ResNet,它就是 residual block 模块的堆叠
class resnet(nn.Module):
     def __init__(self, in_channel, num_classes, verbose = False):
          super(resnet, self).__init__()
          self.verbose = verbose
          
          self.block1 = nn.Conv2d(in_channel, 64, 7, 2)
          
          self.block2 = nn.Sequential(
               nn.MaxPool2d(3, 2),
               residual_block(64, 64),
               residual_block(64, 64)
          )
          self.block3 = nn.Sequential(
               residual_block(64, 128, False),
               residual_block(128, 128)
          )
          self.block4 = nn.Sequential(
               residual_block(128, 256, False),
               residual_block(256, 256)
          )
          self.block5 = nn.Sequential(
               residual_block(256, 512, False),
               residual_block(512, 512),
               nn.AvgPool2d(3)
          )
          self.classifier = nn.Linear(512, num_classes)
          
     def forward(self, x):
          x = self.block1(x)
          if self.verbose:
               print('block 1 output: {}'.format(x.shape))
          x = self.block2(x)
          if self.verbose:
               print('block 2 output: {}'.format(x.shape))
          x = self.block3(x)
          if self.verbose:
               print('block 3 output: {}'.format(x.shape))
          x = self.block4(x)
          if self.verbose:
               print('block 4 output: {}'.format(x.shape))
          x = self.block5(x)
          if self.verbose:
               print('block 5 output: {}'.format(x.shape))
          x = x.view(x.shape[0], -1)
          x = self.classifier(x)
          return x
          
#输出一下每个 block 之后的大小
test_net = resnet(3, 10, True)
test_x = Variable(torch.zeros(1, 3, 96, 96))
test_y = test_net(test_x)
print('output: {}'.format(test_y.shape))      

def data_tf(x):
    x = x.resize((96, 96), 2) # 将图片放大到 96 x 96
    x = np.array(x, dtype='float32') / 255
    x = (x - 0.5) / 0.5 # 标准化,这个技巧之后会讲到
    x = x.transpose((2, 0, 1)) # 将 channel 放到第一维,只是 pytorch 要求的输入方式
    x = torch.from_numpy(x)
    return x
     
train_set = CIFAR10('./data', train=True, transform=data_tf)
train_data = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)
test_set = CIFAR10('./data', train=False, transform=data_tf)
test_data = torch.utils.data.DataLoader(test_set, batch_size=128, shuffle=False)    

net = resnet(3, 10)
optimizer = torch.optim.SGD(net.parameters(), lr=0.01)
criterion = nn.CrossEntropyLoss()

from datetime import datetime

def get_acc(output, label):
     total = output.shape[0]
     _, pred_label = output.max(1)
     num_correct = (pred_label == label).sum().data[0]
     return num_correct / total

def train(net, train_data, valid_data, num_epochs, optimizer, criterion):
     if torch.cuda.is_available():
          net = net.cuda()
     prev_time = datetime.now()
     for epoch in range(num_epochs):
          train_loss = 0
          train_acc = 0
          net = net.train()
          for im, label in train_data:
               if torch.cuda.is_available():
                    im = Variable(im.cuda())
                    label = Variable(label.cuda())
               else:
                    im = Variable(im)
                    label = Variable(label)
               #forward
               output = net(im)
               loss = criterion(output, label)
               #forward
               optimizer.zero_grad()
               loss.backward()
               optimizer.step()
               
               train_loss += loss.data[0]
               train_acc += get_acc(output, label)
          cur_time = datetime.now()
          h, remainder = divmod((cur_time-prev_time).seconds, 3600)
          m, s = divmod(remainder, 60)
          time_str = "Time %02d:%02d:%02d" % (h, m, s)
          if valid_data is not None:
               valid_loss = 0
               valid_acc = 0
               net = net.eval()
               for im, label in valid_data:
                    if torch.cuda.is_available():
                         im = Variable(im.cuda(), volatile=True)
                         label = Variable(label.cuda(), volatile=True)
                    else:
                         im = Variable(im, volatile=True)
                         label = Variable(label, volatile=True)
                    output = net(im)
                    loss = criterion(output, label)
                    valid_loss += loss.item()
                    valid_acc += get_acc(output, label)
               epoch_str = (
                "Epoch %d. Train Loss: %f, Train Acc: %f, Valid Loss: %f, Valid Acc: %f, "
                % (epoch, train_loss / len(train_data),
                   train_acc / len(train_data), valid_loss / len(valid_data),
                   valid_acc / len(valid_data)))
          else:
               epoch_str = ("Epoch %d. Train Loss: %f, Train Acc: %f, " %
                         (epoch, train_loss / len(train_data),
                          train_acc / len(train_data)))
               
          prev_time = cur_time
          print(epoch_str + time_str)
               
                                                  
train(net, train_data, test_data, 20, optimizer, criterion)  
          
#ResNet 使用跨层通道使得训练非常深的卷积神经网络成为可能。同样它使用很简单的卷积层配置,使得其拓展更加简单。          
          
          

 

你可能感兴趣的:(pytorch)