李宏毅机器学习课程HW03代码解释

作业3任务是将图片进行分类

从官网上下载数据到data文件里面。此外,将代码分为三个模块,分别是dataset,model以及main。

李宏毅机器学习课程HW03代码解释_第1张图片

 一、dataset模块

此模块作用是读取图片数据。

重要函数:os.path.join(path,x)    将path和x路径组合在一起

#导入库
import os
from PIL import Image
from torch.utils.data import Dataset

#定义读入数据集
class FoodDataset(Dataset):

    def __init__(self, path, tfm, files=None):
        super(FoodDataset).__init__()
        self.path = path  # 图片文件路径
        # 找到文件夹里的每一个图片文件,os.path.join()是将多个路径组合在一起,path是文件夹路径,x是文件夹下每一个图片的局部路径
        self.files = sorted([os.path.join(path, x) for x in os.listdir(path) if x.endswith(".jpg")])  
        if files != None:
            self.files = files
        print(f"One {path} sample", self.files[0])
        self.transform = tfm  # 转换图片大小,在main函数中有定义
    #  返回文件大小
    def __len__(self):
        return len(self.files)

    def __getitem__(self, idx):
        fname = self.files[idx]
        im = Image.open(fname)  # 打开图片文件
        im = self.transform(im)
        # im = self.data[idx]
        try:
            label = int(fname.split("\\")[-1].split("_")[0])  # 找到每一个图片的类别,为了后面计算分类的准确度
        except:
            label = -1  # test has no label 倘若没有找到类别,则返回-1
        return im, label

二、model 函数

model函数的作用是建立模型,在此设置5次CNN卷积后输出512组4*4大小的图片数据,再通过线性层后输出[64,11]大小的二维数据。

from torch import nn


class Classifier(nn.Module):
    def __init__(self):
        #  继承父类
        super(Classifier, self).__init__()
        # input 維度 [3, 128, 128]
        self.cnn = nn.Sequential(
            #  3:输入通道数,64:输出通道数,3:卷积核大小,1:步长,1:填充大小
            nn.Conv2d(3, 64, 3, 1, 1),  # [64, 128, 128]
            nn.BatchNorm2d(64),  # 传入数字需和输出通道数相同
            nn.ReLU(),  # 激活函数
            nn.MaxPool2d(2, 2, 0),  # [64, 64, 64]#池化层改变图片的宽、高,128/2=64

            nn.Conv2d(64, 128, 3, 1, 1),  # [128, 64, 64]
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [128, 32, 32]

            nn.Conv2d(128, 256, 3, 1, 1),  # [256, 32, 32]
            nn.BatchNorm2d(256),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [256, 16, 16]

            nn.Conv2d(256, 512, 3, 1, 1),  # [512, 16, 16]
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [512, 8, 8]

            nn.Conv2d(512, 512, 3, 1, 1),  # [512, 8, 8]
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.MaxPool2d(2, 2, 0),  # [512, 4, 4]  512组特征
        )
        self.fc = nn.Sequential(
            nn.Linear(512 * 4 * 4, 1024),  # 输入512 * 4 * 4,输出1024大小
            nn.ReLU(),
            nn.Linear(1024, 512),
            nn.ReLU(),
            nn.Linear(512, 11)  # 11:按要求需要分成11个类
        )

    def forward(self, x):
        out = self.cnn(x)
        out = out.view(out.size()[0], -1)  # [64,512,4,4]-->[64,512*4*4]
        return self.fc(out) #[64,11]

三、main函数

导入库,包括model和dataset两部分

import os
import torch
from torch import nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
from tqdm import tqdm
from dataset import FoodDataset
from model import Classifier

准备测试集,所有图片要求是相同大小,此处是128*128,也可以制定其他大小 。之后转为tensor格式

test_tfm = transforms.Compose([
    transforms.Resize((128, 128)),  # 转换测试集的图片大小,设置为相同大小
    transforms.ToTensor(),
])

准备训练集,训练集还可以做其他变换,例如放大、缩小、对称变换等,此处只是单纯更改了图片大小。

train_tfm = transforms.Compose([
    # Resize the image into a fixed shape (height = width = 128)
    transforms.Resize((128, 128)),训练集图片还可以做其他变化,例如图片对称翻转等等
    transforms.ToTensor(),
])

各种定义

batch_size = 64  # 每组64张图片
_dataset_dir = "./data/food11"  # 数据路径
train_set = FoodDataset(os.path.join(_dataset_dir,"training"), tfm=train_tfm)  # 读取文件
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True)  # 打开文件并根据batch分配
valid_set = FoodDataset(os.path.join(_dataset_dir,"validation"), tfm=test_tfm)  # 读取文件
valid_loader = DataLoader(valid_set, batch_size=batch_size, shuffle=True, num_workers=0, pin_memory=True)  # 打开文件并根据batch分配

n_epochs = 3
patience = 300  # 如果300次仍没有土生,则提前终止
model = Classifier().to(device)
criterion = nn.CrossEntropyLoss()  # 交叉熵  
optimizer = torch.optim.Adam(model.parameters(), lr=0.0003, weight_decay=1e-5)  #指定优化器,也可以用其他优化器,传入模型参数和学习率
stale = 0
best_acc = 0
_exp_name = "sample"

配置使用GPU或者CPU,最好使用GPU

device = "cuda" if torch.cuda.is_available() else "cpu"

开始训练

for epoch in range(n_epochs):
    # 开始训练
    model.train()

    # These are used to record information in training.
    train_loss = []
    train_accs = []

    for batch in tqdm(train_loader):
        imgs, labels = batch

        # 选择cpu还是gpu
        logits = model(imgs.to(device))

        # 计算交叉熵
        loss = criterion(logits, labels.to(device))

        # 清零,否则每迭代一次就会加上前面的数据
        optimizer.zero_grad()

        # 反向传播
        loss.backward()

        grad_norm = nn.utils.clip_grad_norm_(model.parameters(), max_norm=10)
        # 更新参数
        optimizer.step()

        acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean()

        # Record the loss and accuracy.
        train_loss.append(loss.item())
        train_accs.append(acc)

    train_loss = sum(train_loss) / len(train_loss)
    train_acc = sum(train_accs) / len(train_accs)

    print(f"[ Train | {epoch + 1:03d}/{n_epochs:03d} ] loss = {train_loss:.5f}, acc = {train_acc:.5f}")

开始验证

model.eval()

    valid_loss = []
    valid_accs = []

    for batch in tqdm(valid_loader):
        # A batch consists of image data and corresponding labels.
        imgs, labels = batch
       
        with torch.no_grad():
            logits = model(imgs.to(device))

        loss = criterion(logits, labels.to(device))
        acc = (logits.argmax(dim=-1) == labels.to(device)).float().mean()

        valid_loss.append(loss.item())
        valid_accs.append(acc)
        # break

    valid_loss = sum(valid_loss) / len(valid_loss)
    valid_acc = sum(valid_accs) / len(valid_accs)

    print(f"[ Valid | {epoch + 1:03d}/{n_epochs:03d} ] loss = {valid_loss:.5f}, acc = {valid_acc:.5f}")

    if valid_acc > best_acc:
        with open(f"./{_exp_name}_log.txt", "a"):
            print(f"[ Valid | {epoch + 1:03d}/{n_epochs:03d} ] loss = {valid_loss:.5f}, acc = {valid_acc:.5f} -> best")
    else:
        with open(f"./{_exp_name}_log.txt", "a"):
            print(f"[ Valid | {epoch + 1:03d}/{n_epochs:03d} ] loss = {valid_loss:.5f}, acc = {valid_acc:.5f}")

    if valid_acc > best_acc:
        print(f"Best model found at epoch {epoch}, saving model")
        torch.save(model.state_dict(), f"{_exp_name}_best.ckpt")  # only save best to prevent output memory exceed error
        best_acc = valid_acc
        stale = 0
    else:
        stale += 1
        if stale > patience:
            print(f"No improvment {patience} consecutive epochs, early stopping")
            break

笔者初学机器学习,还有很多不懂的地方,如有错误恳请各位读者不吝告知,笔者不胜感激。

你可能感兴趣的:(深度学习,人工智能)