基于pytorch的图像分类网络搭建思路

基于pytorch的图像分类网络搭建思路
本文的基本内容为:
实现了一个基于pytorch框架,Resnet18网络结构的图像分类网络。经训练后可以良好的实现宝可梦(皮卡丘,小火龙,妙蛙种子,杰尼龟,超梦)图片的分类.
key words:pytorch,Resnet18,迁移学习(transfer learning),图像分类(image classification)
本文基于龙良曲的pytorch深度学习代码进行总结
Tips:
1.代码参考龙良曲老师的github 63节迁移学习
https://github.com/dragen1860/Deep-Learning-with-PyTorch-Tutorials
有改动,其中检测部分是自己写的
2.本文中引入visdom进行实时训练情况监测,所以在运行前请在终端启动visdom

python -m visdom.server

1.数据集部分

import torch
import os, glob
import random, csv

from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image


class Pokemon(Dataset):     # 定义Pokemon数据集,继承torch的Dataset类

    def __init__(self, root, resize, mode):
        super(Pokemon, self).__init__()

        self.root = root
        self.resize = resize

        self.name2label = {}

        for name in sorted(os.listdir(os.path.join(root))):   #  通过目录的安排规律获取预测值位置与Pokemon名字的对应关系字典
            if not os.path.isdir(os.path.join(root, name)):
                continue

            self.name2label[name] = len(self.name2label.keys())

        # print(self.name2label)
        # print(self.name2label.keys())

        self.images, self.labels = self.load_csv('images.csv')   # 读取csv文件,获取images路径和对应的label

        if mode == 'train':       # 训练 验证 测试集划分 6:2:2
            self.images = self.images[:int(0.6*len(self.images))]
            self.labels = self.labels[:int(0.6*len(self.labels))]
        elif mode == 'val':
            self.images = self.images[int(0.6*len(self.images)):int(0.8*len(self.images))]
            self.labels = self.labels[int(0.6*len(self.labels)):int(0.8*len(self.labels))]
        else:
            self.images = self.images[int(0.8*len(self.images)):]
            self.labels = self.labels[int(0.8*len(self.labels)):]



    def load_csv(self, filename):  # 函数输入:需要读取的csv文件名字  函数输出:images,label的列表

        if not os.path.exists(os.path.join(self.root,filename)):  # 如果不存在csv文件,那么创建它

            images = []
            for name in self.name2label.keys():  # 用glob.glob()获取root目录下三种格式的图片
                images += glob.glob(os.path.join(self.root, name, '*.png'))
                images += glob.glob(os.path.join(self.root, name, '*.jpg'))
                images += glob.glob(os.path.join(self.root, name, '*.jpeg'))

            random.shuffle(images)  # 打散图片顺序,不再根据名字排列
            with open(os.path.join(self.root,filename),mode='w',newline='') as f:
                writer = csv.writer(f)
                for img in images:
                    name = img.split(os.sep)[-2]
                    label = self.name2label[name]

                    writer.writerow([img,label])
                print('written into csv file',filename)   # 写入csv文件,其中利用路径中的名字和name2label字典获取索引

        images, labels = [],[]      # 如果存在csv文件,那么读取它,此处先将images和labels清零防止受到前面写入csv时的影响
        with open(os.path.join(self.root,filename)) as f:
            reader = csv.reader(f)
            for row in reader:
                img, label = row
                label = int(label)

                images.append(img)
                labels.append(label)

        assert len(images) == len(labels)

        return images , labels    # 按行读取images和labels,值返回

    def __len__(self):
        return len(self.images)


    def denormalize(self,x_hat):

        mean = [0.485, 0.456, 0.406]
        std = [0.229, 0.224, 0.225]

        # x_hat = (x-mean)/std
        # x= x_hat*std+mean
        mean = torch.tensor(mean).unsqueeze(1).unsqueeze(1)
        std = torch.tensor(std).unsqueeze(1).unsqueeze(1)

        x = x_hat * std + mean

        return x



    def __getitem__(self, idx):
        # idx~[0~len(images)]
        # self.images, self.labels
        # img: 'pokemon\\bulbasaur\\00000000.png'
        # label: 0
        img, label = self.images[idx], self.labels[idx]

        tf = transforms.Compose([
            lambda x:Image.open(x).convert('RGB'), # string path= > image data
            transforms.Resize((int(self.resize*1.25), int(self.resize*1.25))),
            transforms.RandomRotation(15),
            transforms.CenterCrop(self.resize),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

        img = tf(img)
        label = torch.tensor(label)


        return img, label

def main():
    import visdom
    import time

    viz = visdom.Visdom()
    db = Pokemon('pokemon', 64, 'train')
    x,y = next(iter(db))
    print('sample:',x.shape,y.shape,y)

    viz.image(db.denormalize(x),win='sample_x',opts=dict(title='sample_x'))

    loader = DataLoader(db,batch_size=32,shuffle=True,num_workers=4)

    for x,y in loader:
        viz.images(db.denormalize(x),nrow=8,win='batch',opts=dict(title='batch'))
        viz.text(str(y.numpy()),win='label',opts=dict(title='batch-y'))
        time.sleep(10)



if __name__ == '__main__':
    main()

2.网络训练

import  torch
from    torch import optim, nn
import  visdom
import  torchvision
from    torch.utils.data import DataLoader

from    pokemon import Pokemon
# from    resnet import ResNet18
from    torchvision.models import resnet18

from    utils import Flatten

batchsz = 32
lr = 5e-4
epochs = 20

device = torch.device('cuda')
torch.manual_seed(1234)


train_db = Pokemon('pokemon', 224, mode='train')
val_db = Pokemon('pokemon', 224, mode='val')
test_db = Pokemon('pokemon', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True,
                          num_workers=4)
val_loader = DataLoader(val_db, batch_size=batchsz, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, num_workers=2)


viz = visdom.Visdom()

def evalute(model, loader):
    model.eval()
    
    correct = 0
    total = len(loader.dataset)

    for x,y in loader:
        x,y = x.to(device), y.to(device)
        with torch.no_grad():
            logits = model(x)
            pred = logits.argmax(dim=1)
        correct += torch.eq(pred, y).sum().float().item()

    return correct / total

def main():

    # model = ResNet18(5).to(device)
    trained_model = resnet18(pretrained=True)
    model = nn.Sequential(*list(trained_model.children())[:-1], #[b, 512, 1, 1]
                          Flatten(), # [b, 512, 1, 1] => [b, 512]
                          nn.Linear(512, 5)
                          ).to(device)
    # x = torch.randn(2, 3, 224, 224)
    # print(model(x).shape)

    optimizer = optim.Adam(model.parameters(), lr=lr)
    criteon = nn.CrossEntropyLoss()


    best_acc, best_epoch = 0, 0
    global_step = 0
    viz.line([0], [-1], win='loss', opts=dict(title='loss'))
    viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
    for epoch in range(epochs):

        for step, (x,y) in enumerate(train_loader):

            # x: [b, 3, 224, 224], y: [b]
            x, y = x.to(device), y.to(device)

            model.train()
            print(x.size())
            logits = model(x)
            loss = criteon(logits, y)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            viz.line([loss.item()], [global_step], win='loss', update='append')
            global_step += 1

        if epoch % 1 == 0:

            val_acc = evalute(model, val_loader)
            if val_acc> best_acc:
                best_epoch = epoch
                best_acc = val_acc

                torch.save(model.state_dict(), 'best.mdl')

                viz.line([val_acc], [global_step], win='val_acc', update='append')


    print('best acc:', best_acc, 'best epoch:', best_epoch)

    model.load_state_dict(torch.load('best.mdl'))
    print('loaded from ckpt!')

    test_acc = evalute(model, test_loader)
    print('test acc:', test_acc)





if __name__ == '__main__':
    main()

3.验证代码

import torch
from torch import optim, nn
from PIL import Image, ImageDraw, ImageFont
from torchvision import transforms
from torchvision.models import resnet18
import os

resize = 224  # 设置resize参数,保持与事先设置的网络入口图片大小参数一致。Resnet18的图片大小参数为3*224*224
device = torch.device('cuda')  # 设置device,试运行的电脑没有安装cuda,为cpu运行
weights = 'F:/Deep-Learning-with-PyTorch-Tutorials-master/lesson63-transfer learning/best.mdl'  # 输入权重的路径,此处权重为前面的迁移学习获得
path = 'images'  # 需要检测的文件夹名字


# imgsz=224

def label2name(root, label):  # 函数输入:数据集路径,网络预测类别数字  函数输出:网络预测类别名字
    namedic = {}
    for name in sorted(os.listdir(os.path.join(root))):   # 通过对pokemon文件夹的摆放获取预测数字值和pokemon名字的字典
        if not os.path.isdir(os.path.join(root, name)):
            continue
        namedic[len(namedic.keys())] = name
    # namedic = {0: 'bulbasaur', 1: 'charmander', 2: 'mewtwo', 3: 'pikachu', 4: 'squirtle'}
    # 也可以通过这行代码直接输入数字与pokemon名字的字典,此时不需要root参数
    # print(namedic)
    pokemon_name = namedic[label]
    return pokemon_name


def creatmodel():   # 函数输出:返回网络结构模型
    trained_model = resnet18(pretrained=True) # 利用torch预训练的resnet18模型
    model = nn.Sequential(*list(trained_model.children())[:-1],  # [b, 512, 1, 1]  利用.children方法,将模型的前17层设置与resnet18一致
                          nn.Flatten(),  # [b, 512, 1, 1] => [b, 512]
                          nn.Linear(512, 5)  # 全连接层,输出为最后5种不同的pokemon类型
                          ).to(device)
    return model


def getimagelist(source):   # 函数输入:检测文件夹路径  函数输出:所有检测文件的路径列表
    rlist = []
    for dir, folder, file in os.walk(source):
        for i in file:
            t = "%s\%s" % (dir, i)
            rlist.append(t)
    # print(rlist)
    return rlist


def drawimagetext(image, context):   # 函数输入:需要写入内容的图片,需要写入的内容  函数输出:写入内容后的图片
    # get an image
    # make a blank image for the text, initialized to transparent text color
    img = Image.open(image)
    # get a font
    fnt = ImageFont.truetype("arial.ttf", 40)
    # get a drawing context
    d = ImageDraw.Draw(img)
    context = str(context)
    d.text((10, 10), context, font=fnt, fill=(0, 0, 0))

    return img


def main():
    model = creatmodel() # 创建模型
    model.load_state_dict(torch.load('best.mdl'))  # load the parameters
    data_transform = transforms.Compose([  # 检测数据预处理步骤
        lambda x: Image.open(x).convert('RGB'),  # string path= > image data
        transforms.Resize((resize, resize)),   # Resize 到网络输入大小
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])     # 由于网络训练时进行了Normalize,检测时也应当进行Normalize再送入网络
    ])

    for image in getimagelist(path):  # 进行图片预测的for循环
        img = data_transform(image)   # 取出图片+预处理
        img = torch.unsqueeze(img, dim=0)  # 扩张batch维度,与网络入口对齐
        img = img.to(device)           # 转移到GPU上
        # print(img.size())
        model.eval()                    # 进入验证模式
        with torch.no_grad():
            logits = model(img)  # 送入模型的img为一个tensor,得到模型输出的五种神奇宝贝的分别的概率
            pred = logits.argmax(dim=1)   # 预测值为概率最大值所处的位置
            # print(int(pred[0]))
            image_afterdraw = drawimagetext(image, label2name('pokemon', int(pred[0])))  # 画图,将信息写入
        image_afterdraw.show()  # 展示图片
    # 图片保存模块
    #     image_afterdraw.save('./imagesave/{}.png'.format(i))
    #     i +=1

if __name__ == '__main__':
    main()

你可能感兴趣的:(笔记,pytorch,深度学习,分类)