目录
数据预处理部分:
网络模块设置:
网络模型保存与测试
数据读取与预处理操作
制作好数据源:
读取标签对应的实际名字
加载models中提供的模型,并且直接用训练的好权重当做初始化参数
模型参数更新
把模型输出层改成自己的
设置哪些层需要训练
优化器设置
- 数据增强:torchvision中transforms模块自带功能,比较实用
- 数据预处理:torchvision中transforms也帮我们实现好了,直接调用即可
- DataLoader模块直接读取batch数据
- 加载预训练模型,torchvision中有很多经典网络架构,调用起来十分方便,并且可以用人家训练好的权重参数来继续训练,也就是所谓的迁移学习
- 需要注意的是别人训练好的任务跟咱们的可不是完全一样,需要把最后的head层改一改,一般也就是最后的全连接层,改成咱们自己的任务
- 训练时可以全部重头训练,也可以只训练最后咱们任务的层,因为前几层都是做特征提取的,本质任务目标是一致的
- 模型保存的时候可以带有选择性,例如在验证集中如果当前效果好则保存
- 读取模型进行实际测试
import os
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import torch
from torch import nn
import torch.optim as optim
import torchvision
#pip install torchvision
from torchvision import transforms, models, datasets
#https://pytorch.org/docs/stable/torchvision/index.html
import imageio
import time
import warnings
warnings.filterwarnings("ignore")
import random
import sys
import copy
import json
from PIL import Image
data_dir = './flower_data/'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
- data_transforms中指定了所有图像预处理操作
- ImageFolder假设所有的文件按文件夹保存好,每个文件夹下面存贮同一类别的图片,文件夹的名字为分类的名字
data_transforms = {
'train':
transforms.Compose([
transforms.Resize([96, 96]),
transforms.RandomRotation(45),#随机旋转,-45到45度之间随机选
transforms.CenterCrop(64),#从中心开始裁剪
transforms.RandomHorizontalFlip(p=0.5),#随机水平翻转 选择一个概率概率
transforms.RandomVerticalFlip(p=0.5),#随机垂直翻转
transforms.ColorJitter(brightness=0.2, contrast=0.1, saturation=0.1, hue=0.1),#参数1为亮度,参数2为对比度,参数3为饱和度,参数4为色相
transforms.RandomGrayscale(p=0.025),#概率转换成灰度率,3通道就是R=G=B
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])#均值,标准差
]),
'valid':
transforms.Compose([
transforms.Resize([64, 64]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
batch_size = 128
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'valid']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True) for x in ['train', 'valid']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'valid']}
class_names = image_datasets['train'].classes
image_datasets
{'train': Dataset ImageFolder Number of datapoints: 6552 Root location: ./flower_data/train StandardTransform Transform: Compose( Resize(size=[96, 96], interpolation=bilinear, max_size=None, antialias=None) RandomRotation(degrees=[-45.0, 45.0], interpolation=nearest, expand=False, fill=0) CenterCrop(size=(64, 64)) RandomHorizontalFlip(p=0.5) RandomVerticalFlip(p=0.5) ColorJitter(brightness=[0.8, 1.2], contrast=[0.9, 1.1], saturation=[0.9, 1.1], hue=[-0.1, 0.1]) RandomGrayscale(p=0.025) ToTensor() Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ), 'valid': Dataset ImageFolder Number of datapoints: 818 Root location: ./flower_data/valid StandardTransform Transform: Compose( Resize(size=[64, 64], interpolation=bilinear, max_size=None, antialias=None) ToTensor() Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) )}
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
{'1': 'pink primrose',
'10': 'globe thistle',
'100': 'blanket flower',
'101': 'trumpet creeper',
'102': 'blackberry lily',
'11': 'snapdragon',
'12': "colt's foot",
'13': 'king protea',
'14': 'spear thistle',
'15': 'yellow iris',
'16': 'globe-flower',
'17': 'purple coneflower',
'18': 'peruvian lily',
'19': 'balloon flower',
'2': 'hard-leaved pocket orchid',
'20': 'giant white arum lily',
'21': 'fire lily',
'22': 'pincushion flower',
'23': 'fritillary',
'24': 'red ginger',
'25': 'grape hyacinth',
'26': 'corn poppy',
'27': 'prince of wales feathers',
'28': 'stemless gentian',
'29': 'artichoke',
...
'95': 'bougainvillea',
'96': 'camellia',
'97': 'mallow',
'98': 'mexican petunia',
'99': 'bromelia'}
model_name = 'resnet' #可选的比较多 ['resnet', 'alexnet', 'vgg', 'squeezenet', 'densenet', 'inception']
#是否用人家训练好的特征来做
feature_extract = True #都用人家特征,咱先不更新
# 是否用GPU训练
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
model_ft = models.resnet18()#18层的能快点,条件好点的也可以选152
model_ft
def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):
model_ft = models.resnet18(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extract)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 102)#类别数自己根据自己任务来
input_size = 64#输入大小根据自己配置来
return model_ft, input_size
model_ft, input_size = initialize_model(model_name, 102, feature_extract, use_pretrained=True)
#GPU还是CPU计算
model_ft = model_ft.to(device)
# 模型保存,名字自己起
filename='checkpoint.pth'
# 是否训练所有层
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
params_to_update = []
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
print("\t",name)
else:
for name,param in model_ft.named_parameters():
if param.requires_grad == True:
print("\t",name)
# 优化器设置
optimizer_ft = optim.Adam(params_to_update, lr=1e-2)#要训练啥参数,你来定
scheduler = optim.lr_scheduler.StepLR(optimizer_ft, step_size=10, gamma=0.1)#学习率每7个epoch衰减成原来的1/10
criterion = nn.CrossEntropyLoss()