视频地址:https://www.bilibili.com/video/BV1z64y1o7iz?spm_id_from=333.999.0.0&vd_source=00412a0a94ad6b00d46be19dfdd12b51
竞赛地址:https://www.kaggle.com/c/classify-leaves
数据集:176种树叶,27152张图片,其中训练集18353张,测试集8799张。
环境:Windows11+Pytorch 1.12.0+Jupyter notebook
包的准备:
! pip install ttach ## 该包中自带多种数据增强函数
! pip install pandas
! pip install matplotlib
! pip install tqdm ## 进度条展示,训练时使用
导入包:
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from torch.utils.data import DataLoader,Dataset
from torchvision import transforms
from PIL import Image
import ttach as tta
import os
import matplotlib.pyplot as plt
import torchvision.models as models
from tqdm import tqdm
到Kaggle官网下载数据集,打开测试集查看。
labels_dataframe = pd.read_csv('你的trin.csv文件的路径')
len(labels_dataframe)
18353
# 将训练集里的重复标签去掉:set(labels_dataframe['label'])
# 将去掉重复标签后的标签转换为列表并排序-得到唯一的标签并按字母排序sorted(list(set(labels_dataframe['label'])))
leaves_labels = sorted(list(set(labels_dataframe['label']))) #set删掉重复的 sort排序
n_classes = len(leaves_labels) #获取标签的类别个数
n_classes
176
class_to_num = dict(zip(leaves_labels,range(n_classes)))
num_to_class ={ v:k for k,v in class_to_num.items()}
28
在读取自己的数据集/读取的数据不符合Dataset设置的数据集时,需要重写DataSet类中的方法。
详细的重写方法参照:https://blog.csdn.net/XX_123_1_RJ/article/details/98879565
继承Pytorch的dataset来根据当前的数据集设置读取方式
# 继承pytorch的dataset,创建自己的Data
class LeavesData(Dataset):
def __init__(self,csv_path,file_path,mode='train',valid_ratio=0.2,
resize_height=256,resize_weight=256):
"""
csv_path:csv文件的路径
file_path:图像文件的路径
mode(string):train/valid/test
valid_ratio:验证集比例
resize_height/weight:进入训练的数据的希望尺寸
"""
# 参数的初始化
self.resize_height = resize_height
self.resize_weight = resize_weight
self.file_path = file_path
self.mode = mode
# 还有其他变量没有初始化,因为在当前类中的其他函数中没有用到这些变量,只在当前类被使用到
# 读取csv文件,使用pandas
self.data_info = pd.read_csv(csv_path,header=None) # 去掉csv文件中的列名
# 计算长度
self.data_len = len(self.data_info.index)-1 # 即使去掉表头,长度仍然会算进去,所以要-1
self.train_len = int(self.data_len*(1-valid_ratio))
if mode=='train':
# 获取图像文件的名称,也就是csv文件中的第一列
self.train_image = self.data_info.iloc[1:self.train_len,0]
self.train_label = self.data_info.iloc[1:self.train_len,1]
self.image_arr = np.asarray(self.train_image)
self.label_arr = np.asarray(self.train_label)
elif mode=='valid':
self.valid_image = self.data_info.iloc[self.train_len:,0]
self.valid_label = self.data_info.iloc[self.train_len:,1]
self.image_arr = np.asarray(self.valid_image)
self.label_arr = np.asarray(self.valid_label)
elif mode=='test':
self.test_image = np.asarray(self.data_info.iloc[1:,0])
self.image_arr = self.test_image
# 记录实际输入的图片个数
self.real_len = len(self.image_arr)
print('Finished reading the {} set of Leaves Dataset ({} samples found)'
.format(mode, self.real_len))
def __getitem__(self,index):
# 从image_arr中得到索引对应的文件名
single_image_name = self.image_arr[index]
# 读取图像文件
img_as_img = Image.open(self.file_path + single_image_name)
#如果需要将RGB三通道的图片转换成灰度图片可参考下面两行
# if img_as_img.mode != 'L':
# img_as_img = img_as_img.convert('L')
# 数据增强
# torchvision.transforms是pytorch中的图像预处理包。一般用Compose把多个步骤整合到一起:
if self.mode == 'train':
transform = transforms.Compose([
transforms.Resize(224),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
# 如果是验证集和测试集,则不做数据增强,只需要裁剪编程张量Tensor
transform = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img_as_img = transform(img_as_img)
if self.mode=='test':
label = self.label_arr[index]
number_label = class_to_num[label]
return img_as_img,number_label
def __len__(self):
return self.real_len
train_path = 'classify-leaves/train.csv'
test_path = 'classify-leaves/test.csv'
img_path = 'classify-leaves/'
train_dataset = LeavesData(train_path,img_path,mode='train')
val_dataset = LeavesData(train_path,img_path,mode='valid')
test_dataset = LeavesData(test_path,img_path,mode='test')
train_dataset,val_dataset,test_dataset
Finished reading the train set of Leaves Dataset (14681 samples found)
Finished reading the valid set of Leaves Dataset (3672 samples found)
Finished reading the test set of Leaves Dataset (8800 samples found)
(<main.LeavesData at 0x7f8a6168f290>,
<main.LeavesData at 0x7f8a63697890>,
<main.LeavesData at 0x7f8a6369b4d0>)
# 定义data loader
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset,batch_size=90,
shuffle=True,num_workers=0)
val_loader = torch.utils.data.DataLoader(
dataset=val_dataset,batch_size=90,
shuffle=False,
num_workers=0
)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset,
batch_size=90,
shuffle=False,
num_workers=0
)
# GPU计算
def get_device():
return "cuda" if torch.cuda.is_available() else 'cpu'
device = get_device()
device
'cuda'
# 超参数
learning_rate = 1e-4 #1e-4
weight_decay = 1e-3
num_epoch = 50
beta = 1 #cutmix参数
model_path = './pre_res_model.ckpt' #保存中间模型数据,方便加载
#微调学习率
def train_fine_tuning(net,learning_rate,param_group=True):
if param_group:
params_1x = [
param for name, param in net.named_parameters()
if name not in ["fc.weight", "fc.bias"]]
optimizer = torch.optim.Adam([{
'params': params_1x}, {
'params': net.fc.parameters(),
'lr': learning_rate * 10}], lr=learning_rate, #10
weight_decay=0.001)
else:
optimizer = torch.optim.Adam(net.parameters(), lr=learning_rate* 0.2,
weight_decay=0.001)
return optimizer
!pip install timm
!pip install torchinfo
import timm #timm库有更丰富的预训练模型
from torchinfo import summary
model_1 = timm.create_model('seresnext50_32x4d', pretrained=True)
model_1.fc = nn.Linear(model_1.fc.in_features, 176)
nn.init.xavier_uniform_(model_1.fc.weight);
model_1 = model_1.to(device) #GPU
model_1.device = device
model_2 = models.resnext50_32x4d(pretrained=True)
model_2.fc = nn.Linear(model_2.fc.in_features, 176)
nn.init.xavier_uniform_(model_2.fc.weight);
model_2 = model_2.to(device) #GPU
model_2.device = device
model_3 = models.resnext50_32x4d(pretrained=True)
model_3.fc = nn.Linear(model_3.fc.in_features, 176)
nn.init.xavier_uniform_(model_3.fc.weight);
model_3 = model_3.to(device) #GPU
model_3.device = device
#cutmix计算裁剪区域,一种数据增强方法,将数据集中的随机size区域覆盖到当前区域随机位置中
def rand_bbox(size, lamb): #计算自定义裁剪区域
W = size[2]
H = size[3]
cut_rat = np.sqrt(1. - lamb)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
cx = np.random.randint(W)
cy = np.random.randint(H)
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
训练函数这里采用了lr_scheduler改变学习率,动态调整学习率的方法
个人感觉ReduceLROnPlateau相对而言更加方便一些
def train_2(model):
optimizer = train_fine_tuning(model,learning_rate)
criterion = nn.CrossEntropyLoss()
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3, verbose=True, min_lr=0.0000001)
num_epochs = num_epoch
best_acc = 0.0
for epoch in range(num_epochs):
model.train()
train_loss = []
train_accs = []
# 按批迭代训练集,tqdm进度条展示
for batch in tqdm(train_loader):
imgs,labels = batch
imgs = imgs.to(device)# 移动到GPU
labels = labels.to(device)
# beta 一种随机概率分布
lam = np.random.beta(beta,beta)
# 将0,n-1随机打乱进行排列组合的数字序列
rand_index = torch.randperm(imgs.size()[0]).to(device)
labels_a = labels #正常样本标签
labels_b = labels[rand_index] #乱序样本标签
bbx1, bby1, bbx2, bby2 = rand_bbox(imgs.size(), lam) #生成裁剪区域
#将原样本中bbx1:bbx2, bby1:bby2区域改成乱序样本标签对应的区域
imgs[:, :, bbx1:bbx2, bby1:bby2] = imgs[rand_index, :, bbx1:bbx2, bby1:bby2]
#重新计算lambda以精确匹配像素比率(因为有可能裁剪超出边界)
lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (imgs.size()[-1] * imgs.size()[-2]))
# Forward the data. (Make sure data and model are on the same device.)
logits = model(imgs) #将图形数据带入模型计算预测
# 计算交叉熵损失(注意是两个样本的损失按照分割比例加权求和)
loss = criterion(logits, labels_a) * lam + criterion(logits, labels_b) * (1. - lam)
# 清除上一步中存储在参数中的梯度。
optimizer.zero_grad()
# 计算参数的梯度。
loss.backward()
# 用计算的梯度更新参数。
optimizer.step()
# 计算当前批次的精度。
acc = (logits.argmax(dim=-1) == labels).float().mean()
# 记录损失和准确度
train_loss.append(loss.item())
train_accs.append(acc)
# 训练集的平均损失和精度是记录值的平均值。
train_loss = sum(train_loss) / len(train_loss)
train_acc = sum(train_accs) / len(train_accs)
#更新学习率
print("第%d个epoch的学习率:%f" % (epoch, optimizer.param_groups[0]['lr']))
scheduler.step(train_loss)
# 打印信息
print(f"[ Train | {epoch + 1:03d}/{n_epochs:03d} ] loss = {train_loss:.5f}, acc = {train_acc:.5f}")
# ---------- 验证 ----------
#这里我认为在验证处也加上TTA也许更加合理,因为前面对图像进行了变换,
#在验证的时候也将图像进行相对应的变换可能能够提取更多的特征。
# 确保模型处于eval模式,以便禁用dropout等模块并正常工作。
model.eval()
# 这些用于记录验证中的信息
valid_loss = []
valid_accs = []
# 逐批迭代验证集。
for batch in tqdm(val_loader):
imgs, labels = batch
# 不需要梯度验证.
# Using torch.no_grad() accelerates the forward process.
with torch.no_grad():
logits = model(imgs.to(device))
# 我们仍然可以计算损失(但不能计算梯度)。
loss = criterion(logits, labels.to(device))
# 计算当前批次的精度。
acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean()
# 记录损失和准确性
valid_loss.append(loss.item())
valid_accs.append(acc)
# 整个验证集的平均损失和准确度是记录值的平均值
valid_loss = sum(valid_loss) / len(valid_loss)
valid_acc = sum(valid_accs) / len(valid_accs)
# 打印信息.
print(f"[ Valid | {epoch + 1:03d}/{n_epochs:03d} ] loss = {valid_loss:.5f}, acc = {valid_acc:.5f}")
# 如果模型改进了,在这个时间点保存一个检查点
if valid_acc > best_acc:
best_acc = valid_acc
torch.save(model.state_dict(), model_path)
print('saving model with acc {:.3f}'.format(best_acc))
saveFileName = './submission.csv'
# 分别训练三个模型并将最优参数保存到相应的模型里
train_2(model_1)
model_1.load_state_dict(torch.load(model_path)) #加载训练结果
train_2(model_2)
model_2.load_state_dict(torch.load(model_path))
train_2(model_3)
model_3.load_state_dict(torch.load(model_path))
第0个epoch的学习率:0.000100
[ Train | 001/050 ] loss = 3.93691, acc = 0.21181
100%|██████████████████████████████████████████████████████████████████████████████████| 41/41 [00:23<00:00, 1.78it/s]
[ Valid | 001/050 ] loss = 1.35277, acc = 0.68184
saving model with acc 0.682
耐心等待训练完成
# 确保模型处于eval模式.
# 一些模块如 Dropout or BatchNorm 会影响性能 如果模型处于训练模式.
model_1.eval()
model_2.eval()
model_3.eval()
#加载TTA
#tta_model = tta.ClassificationTTAWrapper(model, tta.aliases.d4_transform(), merge_mode='mean')
tta_model_1 = tta.ClassificationTTAWrapper(model_1, tta.aliases.flip_transform(), merge_mode='mean')
tta_model_2 = tta.ClassificationTTAWrapper(model_2, tta.aliases.flip_transform(), merge_mode='mean')
tta_model_3 = tta.ClassificationTTAWrapper(model_3, tta.aliases.flip_transform(), merge_mode='mean')
# 初始化存储预测的列表。
predictions = []
# 逐批迭代测试集.
for batch in tqdm(test_loader):
imgs = batch
with torch.no_grad():
logits_1 = tta_model_1(imgs.to(device)) #预测
logits_2 = tta_model_2(imgs.to(device))
logits_3 = tta_model_3(imgs.to(device))
logits = 0.5*logits_1+0.5*logits_2+0.5*logits_3
#logits = logits_2+logits_3
# 以最大的logit类为预测,并记录下来
predictions.extend(logits.argmax(dim=-1).cpu().numpy().tolist())
preds = []
for i in predictions:
preds.append(num_to_class[i]) # 将预测的数字类别还原为类别名称
test_data = pd.read_csv(test_path) #读取预测数据集
test_data['label'] = pd.Series(preds) #将预测的类型名整理成一维数组
submission = pd.concat([test_data['image'], test_data['label']], axis=1)
submission.to_csv(saveFileName, index=False)
print("Done!!!!!!!!!!!!!!!!!!!!!!!!!!!")
至此训练完成,预测的数据保存在submission.csv文件中,提交到kaggle中即可