Github复现之图像分类

GitHub链接:https://github.com/lxztju/pytorch_classification
遥感分类数据集地址:http://www.lmars.whu.edu.cn/prof_web/zhongyanfei/e-code.html
这个代码挺好用的,不需要改什么东西,下面简单说一下怎么用

1.数据准备
直接跑自己的数据吧,文件夹分为train、val、test
一级目录如下,其它文件是自动产生的,等下会说
Github复现之图像分类_第1张图片
二级目录,这里有12个类别,分别放在12个文件夹里,也就是每个类别放一个文件夹就对了
Github复现之图像分类_第2张图片
test里面直接放图片
Github复现之图像分类_第3张图片

2.更改配置文件
改完配置文件基本就可以跑了

import os
# home = os.path.expanduser('~')
home = 'D:/csdn/pytorch_classification'  #根目录,给下全路径,不然会报错
##数据集的类别
NUM_CLASSES = 12  

#训练时batch的大小
BATCH_SIZE = 6

#网络默认输入图像的大小
INPUT_SIZE = 224
#训练最多的epoch
MAX_EPOCH = 200
# 使用gpu的数目
GPUS = 1
# 从第几个epoch开始resume训练,如果为0,从头开始
RESUME_EPOCH = 0

WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9
# 初始学习率
LR = 1e-2

# 采用的模型名称
model_name = 'resnet50'

from models import Resnet50, Resnet101, Resnext101_32x8d,Resnext101_32x16d, Densenet121, Densenet169, Mobilenetv2, Efficientnet, Resnext101_32x32d, Resnext101_32x48d
MODEL_NAMES = {
     
    'resnext101_32x8d': Resnext101_32x8d,
    'resnext101_32x16d': Resnext101_32x16d,
    'resnext101_32x48d': Resnext101_32x48d,
    'resnext101_32x32d': Resnext101_32x32d,
    'resnet50': Resnet50,
    'resnet101': Resnet101,
    'densenet121': Densenet121,
    'densenet169': Densenet169,
    'moblienetv2': Mobilenetv2,
    'efficientnet-b7': Efficientnet,
    'efficientnet-b8': Efficientnet
}

BASE = home + '/dataset/rs/'  #这里记得改下,保证目录拼接起来是对的

# 训练好模型的保存位置
SAVE_FOLDER = BASE + 'weights/'  #weights文件夹会自动创建,保存模型

#数据集的存放位置
TRAIN_LABEL_DIR =BASE + 'train.txt'     
VAL_LABEL_DIR = BASE + 'val.txt'
TEST_LABEL_DIR = BASE + 'test.txt'


##训练完成,权重文件的保存路径,默认保存在trained_model下
TRAINED_MODEL = BASE + 'weights/resnet50/epoch_200.pth'  #训练完以后需要改下这个路径

3.利用/data/preprocess.py生成标签文件txt,我稍微改动了下

import os
import glob
import sys 
sys.path.append("..") 
import cfg
import random

if __name__ == '__main__':
    traindata_path = cfg.BASE + 'train'
    labels = os.listdir(traindata_path)
    valdata_path = cfg.BASE + 'val'
    test_path = cfg.BASE + 'test'
    ##写train.txt文件
    txtpath = cfg.BASE
    # print(labels)
    for index, label in enumerate(labels):
        imglist = glob.glob(os.path.join(traindata_path,label, '*.png'))
        # print(imglist)
        random.shuffle(imglist)
        print(len(imglist))
        trainlist = imglist[:int(0.8*len(imglist))]
        vallist = imglist[(int(0.8*len(imglist))+1):]
        with open(txtpath + 'train.txt', 'a') as f:
            for img in trainlist:
                # print(img + ' ' + str(index))
                f.write(img + ' ' + str(index))
                f.write('\n')

        with open(txtpath + 'val.txt', 'a') as f:
            for img in vallist:
                # print(img + ' ' + str(index))
                f.write(img + ' ' + str(index))
                f.write('\n')

    imglist = glob.glob(os.path.join(test_path, '*.png'))
    with open(txtpath + 'test.txt', 'a') as f:
        for img in imglist:
            f.write(img)
            f.write('\n')

运行以后生成
Github复现之图像分类_第4张图片
4.训练,训练的时候这两个搭配效果会好点,目前我测试是这样的
Github复现之图像分类_第5张图片
训练结束以后,产生:
Github复现之图像分类_第6张图片
5.预测、精度评定,代码改动比较多

import os
from PIL import Image, ImageDraw, ImageFont
import pandas as pd
from tqdm import tqdm
import numpy as np
from collections import Counter
import cfg
from data import tta_test_transform, get_test_transform, Resize, SelfCustomDataset
from utils import LabelSmoothingCrossEntropy

import torch
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F

def load_checkpoint(filepath):
    checkpoint = torch.load(filepath)
    model = checkpoint['model']  # 提取网络结构
    model.load_state_dict(checkpoint['model_state_dict'])  # 加载网络权重参数
    for parameter in model.parameters():
        parameter.requires_grad = False
    model.eval()
    return model


def predict(model):
    # 读入模型
    model = load_checkpoint(model)
    print('..... Finished loading model! ......')
    ##将模型放置在gpu上运行
    if torch.cuda.is_available():
        model.cuda()
    pred_list, _id = [], []
    for i in tqdm(range(len(imgs))):
        img_path = imgs[i].strip()
        # print(img_path)
        _id.append(os.path.basename(img_path).split('.')[0])
        img = Image.open(img_path).convert('RGB')
        # print(type(img))
        img = get_test_transform(size=cfg.INPUT_SIZE)(img).unsqueeze(0)

        if torch.cuda.is_available():
            img = img.cuda()
        with torch.no_grad():
            out = model(img)
            out = F.log_softmax(out, dim=1)
        prediction = torch.argmax(out, dim=1).cpu().item()
        pred_list.append(prediction)
    return _id, pred_list


def tta_predict(model):
    # 读入模型
    model = load_checkpoint(model)
    print('..... Finished loading model! ......')
    ##将模型放置在gpu上运行
    if torch.cuda.is_available():
        model.cuda()
    pred_list, _id = [], []
    for i in tqdm(range(len(imgs))):
        img_path = imgs[i].strip()
        # print(img_path)
        _id.append(int(os.path.basename(img_path).split('.')[0]))
        img1 = Image.open(img_path).convert('RGB')
        # print(type(img))
        pred = []
        for i in range(8):
            img = tta_test_transform(size=cfg.INPUT_SIZE)(img1).unsqueeze(0)

            if torch.cuda.is_available():
                img = img.cuda()
            with torch.no_grad():
                out = model(img)
            prediction = torch.argmax(out, dim=1).cpu().item()
            pred.append(prediction)
        res = Counter(pred).most_common(1)[0][0]
        pred_list.append(res)
    return _id, pred_list

def computeTestSetAccuracy(model, loss_function):
    batch_size = 4
    val_label_dir = cfg.VAL_LABEL_DIR
    val_datasets = SelfCustomDataset(val_label_dir, imageset='val')
    val_dataloader = torch.utils.data.DataLoader(val_datasets, batch_size=batch_size, shuffle=True, num_workers=0)

    model = load_checkpoint(model)
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    test_acc = 0.0
    test_loss = 0.0
    test_data_size = val_datasets.__len__()
    
    correct = 0
    
    with torch.no_grad():
        # model.eval()
        for j, (inputs, labels) in enumerate(val_dataloader):
            inputs = inputs.to(device)
            labels = labels.to(device)
            labels = labels.long()
 
            outputs = model(inputs)
            outputs = F.log_softmax(outputs, dim=1)
            loss = loss_function(outputs, labels)
            test_loss += loss.item() * inputs.size(0)
            ret, predictions = torch.max(outputs.data, 1)
            correct_counts = predictions.eq(labels.data.view_as(predictions))
            acc = torch.mean(correct_counts.type(torch.FloatTensor))
            test_acc += acc.item() * inputs.size(0)
            print("Test Batch Number: {:03d}, Test: Loss: {:.4f}, Accuracy: {:.4f}".format(
                j, loss.item(), acc.item()
            ))

            # pred = outputs.data.max(1, keepdim=True)[1]
            # correct += pred.eq(labels.data.view_as(pred)).cpu().sum()
            pred = torch.argmax(outputs, 1)
            correct += (pred == labels).sum().float()

        avg_test_loss = test_loss/test_data_size
        avg_test_acc = test_acc/test_data_size 
        print("Test loss : " + str(avg_test_loss))
        print("Test accuracy : " + str(avg_test_acc))

        test_loss /= test_data_size
        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
            test_loss, correct, test_data_size,
            100. * correct / test_data_size))

def predict_single(model, test_image_name):
    
    image_transforms = {
     
        'test': transforms.Compose([
            Resize((int(cfg.INPUT_SIZE * (256 / 224)), int(cfg.INPUT_SIZE * (256 / 224)))),
            transforms.CenterCrop(cfg.INPUT_SIZE),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    }
    
    test_directory = os.path.join(cfg.BASE, 'val')
    data = {
     
        'test': datasets.ImageFolder(root=test_directory, transform=image_transforms)
    }
    idx_to_class = {
     v: k for k, v in data['test'].class_to_idx.items()}

    model = load_checkpoint(model)

    transform = image_transforms['test']
 
    test_image = Image.open(test_image_name).convert('RGB')
    # test_image = test_image.resize((224, 224),Image.ANTIALIAS)
    draw = ImageDraw.Draw(test_image)
 
    test_image_tensor = transform(test_image)
 
    if torch.cuda.is_available():
        test_image_tensor = test_image_tensor.view(1, 3, 224, 224).cuda()
    else:
        test_image_tensor = test_image_tensor.view(1, 3, 224, 224)
 
    with torch.no_grad():
        # model.eval()
 
        out = model(test_image_tensor)
        out = F.log_softmax(out, dim=1)
        ps = torch.exp(out)
        print(ps.topk(12, dim=1))                    #这里别忘了改成自己的类别数
        topk, topclass = ps.topk(1, dim=1)
        print("Prediction : ", idx_to_class[topclass.cpu().numpy()[0][0]], ", Score: ", topk.cpu().numpy()[0][0])
        text = idx_to_class[topclass.cpu().numpy()[0][0]] + " " + str(topk.cpu().numpy()[0][0])
        font = ImageFont.truetype('arial.ttf', 36)
        draw.text((0, 0), text, (255, 0, 0), font=font)
        test_image.show()

def accuracy(output, target, topk=(1,)):
    """
    Computes the accuracy over the k top predictions for the specified values of k
    :param output: tensor, output of model
    :param target: tensor, label of input data
    :param topk: tuple, the k top predictions
    """
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].view(-1).float().sum(0, keepdim=True).item()
            # 每个 rank 包含的正确个数
            res.append(correct_k)
        return res

"""
correct_k = accuracy(out, label, topk=(1, 5))
acc1, acc5 = correct_k / len(dataset)
"""


if __name__ == "__main__":

    trained_model = cfg.TRAINED_MODEL
    model_name = cfg.model_name
    with open(cfg.TEST_LABEL_DIR,  'r')as f:
        imgs = f.readlines()

    # _id, pred_list = tta_predict(trained_model)
    _id, pred_list = predict(trained_model)

    submission = pd.DataFrame({
     "ID": _id, "Label": pred_list})
    submission.to_csv(cfg.BASE + '{}_submission.csv'
                      .format(model_name), index=False, header=False)

    # loss_func = nn.CrossEntropyLoss()
    loss_func = LabelSmoothingCrossEntropy()
    computeTestSetAccuracy(trained_model, loss_func)
    predict_single(trained_model, './dataset/rs/val/harbor/0027.tif')

运行以后输出:
Github复现之图像分类_第7张图片

你可能感兴趣的:(pytorch,python)