PyTorch学习日记(四)

书接上文,这次学到了使用pytorch基于经典网络架构训练图像分类模型

一、数据预处理部分

        1.1 数据增强和预处理:torchvision中tranforms模块自带功能

import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
import torch.optim as optim
import torchvision
from torchvision import transforms,models,datasets
import imageio
import time
import warnings
import random
import sys
import copy
import json
from PIL import Image

#数据读取与预处理
data_dir = './flower_data/'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'

data_transforms = {                 #data_transforms中指定了所有图像预处理操作
    'train': transforms.Compose([transforms.RandomRotation(45),    #随机旋转,-45度到45度之间随机选
        transforms.CenterCrop(224),         #从中心开始裁剪
        transforms.RandomHorizontalFlip(p=0.5),#随即水平翻转,选择一个概率概率
        transforms.ColorJitter(brightness=0.2,contrast=0.1,saturation=0.1,hue=0.1),#四个参数分别为亮度,对比度,饱和度,色相
        transforms.RandomGrayscale(p=0.025),#概率转换为灰度率,3通道就是R-G-B
        transforms.ToTensor(),
        transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])#均值,标准差
    ]),
    'valid': transforms.Compose([transforms.Resize(256),
                                 transforms.CenterCrop(224),
                                 transforms.ToTensor(),
                                 transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])]),
}

        1.2 DataLoader模块字节读取batch数据

batch_size = 8
#使用datasets.ImageFolder方法进行读取,第一个参数是文件地址,第二个方法是数据预处理的流程
image_datasets = {x : datasets.ImageFolder(os.path.join(data_dir,x),data_transforms[x]) for x in ['train','valid']}
print(image_datasets)
dataloaders = {x:torch.utils.data.DataLoader(image_datasets[x],batch_size = batch_size,shuffle = True) for x in ['train','valid']}
dataset_sizes = {x : len(image_datasets[x]) for x in ['train','valid']}
class_names = image_datasets['train'].classes
print(dataloaders)
print(dataset_sizes)

PyTorch学习日记(四)_第1张图片

        1.3 数据展示

#读取标签对应的实际名字
with open('cat_to_name.json','r') as f:     #json文件中是编号对应的实际类别的名字
    cat_to_name = json.load(f)

#数据展示
#注意tensor的数据需要转化为numpy的格式,而且还需要还原会标准化的结果
def im_convert(tensor):
    image = tensor.to("cpu").clone().detach()
    image = image.numpy().squeeze()
    image = image.transpose(1,2,0)
    image = image * np.array((0.229,0.224,0.225)) + np.array((0.485,0.456,0.406))
    image = image.clip(0,1)

    return image
fig = plt.figure(figsize=(20,12))
columns = 4
rows = 2

dataiter = iter(dataloaders['valid'])
inputs, classes = dataiter.next()

for idx in range(columns * rows):
    ax = fig.add_subplot(rows,columns,idx+1,xticks = [],yticks = [])
    ax.set_title(cat_to_name[str(int(class_names[classes[idx]]))])
    plt.imshow(im_convert(inputs[idx]))
plt.show()

 

PyTorch学习日记(四)_第2张图片 

二、网络模块设置

        2.1 加载与训练模型:

        在torchvision中又很多经典网络架构,可以直接调用,可以使用其训练好的权重参数继续训练,即迁移学习。但别人训练好的任务跟我们的需求不同,需要将后边的head层改一改,一般为全连接层改为自己需要的。在训练时可以从头训练,也可以只训练最后自己的任务层即全连接层(首先需要将全连接层改为符合自己需求的),因为前几层都是特征提取,本质任务目标是一致的。(注:可以“冻住”卷积层进行训练,即不改变其参数和权重,理论上说数据集越小需要冻结的卷积层越多)

#加载models中的模型,并直接用训练好的参数当作初始化参数
model_name = 'resnet'
#是否用人家训练和的特征来做
feature_extract = True

#是否使用GPU训练
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
    print('CUDA is not available. Training on CPU...')
else:
    print('CUDA is available. Training on GPU')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

#选择是否冻住参数
def set_parameter_requires_grad(model,feature_extracting):
    if feature_extracting:
        for param in model.parameters():
            param.requires_grad = False

model_ft = models.resnet18()


def initialize_model(model_name, num_class, feature_extract, use_pretrained=True):
    '''
    :param model_name: 采用的模型名称
    :param num_class: 目的要分成的类别
    :param feature_extract:是否冻住参数
    :param use_pretrained: 是否下载别人训练好的模型 c\用户\torch\cachez中
    :return:
    model:新构建的模型
    '''
    model_ft = None
    input_size = 0

    if model_name == 'resnet':
        # 加载模型 pretrained 要不要把网络模型下载下来,若pretrained为True则下载
        model_ft = models.resnet18(pretrained=use_pretrained)

        # 迁移学习
        #  model_ft:选用的模型
        # feature_extract:True False 选择是否冻结参数 若是True 则冻住参数 反之不冻住参数
        set_parameter_requires_grad(model_ft,feature_extract)
        # 得到最后一层的数量512 把最后的全连接层改成2048——>102
        num_ftrs = model_ft.fc.in_features
        # 修改最后一层的模型
        model_ft.fc = torch.nn.Sequential(
            torch.nn.Linear(num_ftrs, num_class),
            torch.nn.LogSoftmax(dim=1)
        )
        input_size = 224
    else:
        print('采用了其他模型,还没来得及编写模型代码。。。')

    return model_ft, input_size

model_ft,input_size = initialize_model(model_name,102,feature_extract,use_pretrained=True)

#GPU计算
model_ft = model_ft.to(device)
#模型保存
filename = 'checkpoint.pth'

#是否训练所有层
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract :
    params_to_update = []
    for name,param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            print("\t",name)
else:
    for name,param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            print("\t",name)

#优化器设置
optimizer_ft = optim.Adam(params_to_update,lr=1e-2) #这里学习率设置的不低因为下面设置了一个学习率衰减
scheduler = optim.lr_scheduler.StepLR(optimizer_ft,step_size=7,gamma=0.1)   #学习率没7个epoch衰减为原来的1/10
#最后一层已经是LogSoftmax()了,所以不能用nn.CrossEntropyLoss()计算
#nn.CrossEntropyLoss()相当于LogSoftmax()和nn.NLLLoss()的整合
criterion = nn.NLLLoss()

#训练模块
def train_model(model,dataloaders,criterion,optimizer,num_epochs = 25,is_inception = False,filename = filename):
    since = time.time()
    best_acc = 0
    model.to(device)

    val_acc_history = []
    train_acc_history = []
    train_losses = []
    val_losses = []
    LRs = [optimizer.param_groups[0]['lr']]
    #获得最好的那一次的模型
    best_model_wts = copy.deepcopy(model.state_dict())

    for epoch in range(num_epochs):
        print('Epoch{}/{}'.format(epoch,num_epochs-1))
        print('-'*10)
        # 训练和验证
        for phase in ['train','valid']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_corrects = 0

            #把数据取个遍
            for inputs,labels in dataloaders[phase]:
                inputs = inputs.to(device)
                labels = labels.to(device)

                #清零
                optimizer.zero_grad()
                #只有训练的时候计算和更新梯度
                with torch.set_grad_enabled(phase == 'train'):
                    if is_inception and phase == 'train':
                        outputs,aux_outputs = model(inputs)
                        loss1 = criterion(outputs,labels)
                        loss2 = criterion(aux_outputs,labels)
                        loss = loss1 + loss2
                    else:#resnet执行的是这里
                        outputs = model(inputs)
                        loss = criterion(outputs,labels)
                    # _代表最大的元素 pre代表索引  索引本质就代表了预测元素值
                    _, preds = torch.max(outputs,1)

                    #训练阶段更新权重
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                #计算损失
                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)

            time_elapsed = time.time() - since
            print('Time elapsed {:.0f}m {:.0f}s'.format(time_elapsed // 60,time_elapsed % 60))
            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase,epoch_loss,epoch_acc))

            #得到最好那次的模型
            if phase == 'valid' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
                state = {
                    'state_dict': model.state_dict(),
                    'best_acc': best_acc,
                    'optimizer': optimizer.state_dict()
                }
                torch.save(state, filename)
            if phase == 'valid':
                val_acc_history.append(epoch_acc)
                val_losses.append(epoch_loss)
                scheduler.step(epoch_loss)
            if phase == 'train':
                train_acc_history.append(epoch_acc)
                train_losses.append(epoch_loss)

        print('Optimizer learning rate: {:.7f}'.format(optimizer.param_groups[0]['lr']))
        LRs.append((optimizer.param_groups[0]['lr']))
        print()
    time_elapsed = time.time() - since
    print('Train complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60,time_elapsed % 60))
    print('Best val Acc {:4f}'.format(best_acc))

    #训练完后用最好的一次当作模型的最重的结果
    model.load_state_dict(best_model_wts)
    return model, val_acc_history, train_acc_history, val_losses,train_losses,LRs

#训练
model, val_acc_history, train_acc_history, val_losses,train_losses,LRs = train_model(model_ft,dataloaders,criterion,optimizer_ft,num_epochs=30,is_inception=(model_name == 'inception'))

        之后继续训练所有层:

#再继续训练所有层
for param in model_ft.parameters():
    param.requires_grad = True
#再继续训练所有参数,学习率调小一点
optimizer = optim.Adam(params_to_update,lr=1e-4) #这里学习率设置的小一点
scheduler = optim.lr_scheduler.StepLR(optimizer,step_size=7,gamma=0.1)

#损失函数
criterion = nn.NLLLoss()

#load the checkpoint
checkpoint = torch.load(filename)   #把路径传进去
best_acc = checkpoint['best_acc']
model_ft.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
#训练
model, val_acc_history, train_acc_history, val_losses,train_losses,LRs = train_model(model_ft,dataloaders,criterion,optimizer,num_epochs=30,is_inception=(model_name == 'inception'))

你可能感兴趣的:(pytorch,pytorch,深度学习,神经网络)