Pytorch实战 | P5 运动鞋图片识别(深度学习实践pytorch)

一、我的环境

● 语言环境:Python3.8
● 编译器:pycharm
● 深度学习环境:Pytorch
● 数据来源:链接:https://pan.baidu.com/s/1gA3TXAWpil9l39wJMjwuRA 提取码:zj37

二、主要代码实现

1、main.py

# -*- coding: utf-8 -*-
import torch.utils.data
from torchvision import datasets, transforms
from model import *

# 一、加载数据并处理
train_data_path = './data/train/'
test_data_path = './data/test/'
# 加载文件夹中的数据
train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),  # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(  # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

test_transforms = transforms.Compose([
    transforms.Resize([224, 224]),
    transforms.ToTensor(),
    transforms.Normalize(  # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225])
])
train_dataset = datasets.ImageFolder(train_data_path, transform=train_transforms)
test_dataset = datasets.ImageFolder(test_data_path, transform=test_transforms)

# 将数据进行批次处理
batch_size = 32
train_dl = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dl = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size)

# 二、构建模型网络 model.__init__, model.forward()

# 三、模型训练
# 实例化模型
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device:'.format(device))

model = Network_fn().to(device)


# 设置参数
def adjust_learning_rate(optimizer, epoch, start_lr):
    # 每两个epoch学习率删减到原来的0.92
    lr = start_lr * (0.92 ** (epoch // 2))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


learn_rate = 1e-4  # 初始学习率
optimizer = torch.optim.SGD(model.parameters(), lr=learn_rate)
loss_fn = nn.CrossEntropyLoss()

# 模型训练
epochs = 50

train_loss = []
train_acc = []
test_loss = []
test_acc = []

for epoch in range(epochs):
    # 更新学习率
    adjust_learning_rate(optimizer, epoch, learn_rate)

    model.train()
    epoch_train_acc, epoch_train_loss = model.train1(train_dl, model, loss_fn, optimizer, device)

    model.eval()
    epoch_test_acc, epoch_test_loss = model.test1(test_dl, model, loss_fn, device)

    train_acc.append(epoch_train_acc)
    train_loss.append(epoch_train_loss)
    test_acc.append(epoch_test_acc)
    test_loss.append(epoch_test_loss)

    # 获取当前的学习率
    lr = optimizer.state_dict()['param_groups'][0]['lr']

    template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f}, Lr:{:.2E}')
    print(template.format(epoch + 1, epoch_train_acc * 100, epoch_train_loss,
                          epoch_test_acc * 100, epoch_test_loss, lr))
print('Done')

# 保存模型
torch.save(model.state_dict(), './model/model.pkl')

# 四、模型评估
import matplotlib.pyplot as plt
# 隐藏警告
import warnings

epochs_range = range(epochs)

warnings.filterwarnings("ignore")  # 忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
plt.rcParams['figure.dpi'] = 100  # 分辨率

plt.figure(figsize=(20, 5))

plt.subplot(1, 2, 1)
plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')

plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')

plt.show()

2、model.py

# -*- coding: utf-8 -*-
import torch
import torch.nn as nn


class Network_fn(nn.Module):
    def __init__(self):
        super(Network_fn, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, 12, kernel_size=5, padding=0),
            nn.BatchNorm2d(12),
            nn.ReLU()
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(12, 12, kernel_size=5, padding=0),
            nn.BatchNorm2d(12),
            nn.ReLU()
        )

        self.pool3 = nn.Sequential(
            nn.MaxPool2d(2)
        )

        self.conv4 = nn.Sequential(
            nn.Conv2d(12, 24, kernel_size=5, padding=0),
            nn.BatchNorm2d(24),
            nn.ReLU()
        )

        self.conv5 = nn.Sequential(
            nn.Conv2d(24, 24, kernel_size=5, padding=0),
            nn.BatchNorm2d(24),
            nn.ReLU()
        )

        self.pool6 = nn.Sequential(
            nn.MaxPool2d(2)
        )

        self.dropout = nn.Sequential(
            nn.Dropout(0.2)
        )

        self.fc = nn.Sequential(
            nn.Linear(24 * 50 * 50, 2)
        )

    def forward(self, x):
        batch_size = x.size(0)
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.pool3(x)
        x = self.conv4(x)
        x = self.conv5(x)
        x = self.pool6(x)
        x = self.dropout(x)

        x = x.view(batch_size, -1)  # flatten 变成全连接网络需要的输入 (batch, 24*50*50) ==> (batch, -1), -1 此处自动算出的是24*50*50
        x = self.fc(x)
        return x

    def train1(self, dataloader, model, loss_fn, optimizer, device):
        train_acc, train_loss = 0, 0
        for X, y in dataloader:
            X, y = X.to(device), y.to(device)

            # 前向传播
            pred = model(X)
            loss = loss_fn(pred, y)

            # 反向传播
            optimizer.zero_grad()  # 梯度归零
            loss.backward()
            optimizer.step()  # 更新参数

            train_acc += (pred.argmax(1) == y).type(torch.float).sum().item()
            train_loss += train_loss

        size = len(dataloader.dataset)
        num_batches = len(dataloader)

        train_acc /= size
        train_loss /= num_batches

        return train_acc, train_loss

    def test1(self, dataloader, model, loss_fn, device):
        size = len(dataloader.dataset)  # 测试集的大小
        num_batches = len(dataloader)  # 批次数目, (size/batch_size,向上取整)
        test_loss, test_acc = 0, 0
        with torch.no_grad():
            for imgs, target in dataloader:
                imgs, target = imgs.to(device), target.to(device)

                # 计算loss
                target_pred = model(imgs)
                loss = loss_fn(target_pred, target)

                test_loss += loss.item()
                test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()

            test_acc /= size
            test_loss /= num_batches
        return test_acc, test_loss

3、test.py

# -*- coding: utf-8 -*-
import torch
from PIL import Image
from matplotlib import pyplot as plt
from torchvision import transforms

from model import Network_fn

device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Network_fn()

model.load_state_dict(torch.load('./model/model.pkl', map_location=torch.device('cpu')))

path = './data/test/adidas/2.jpg'
test_img = Image.open(path).convert('RGB')

plt.imshow(test_img)
plt.show()

t_transforms = transforms.Compose([
    transforms.Resize([224, 224]),
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),
])

train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),  # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(  # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])


test_img = train_transforms(test_img)
test_img = test_img.unsqueeze(0)

model.eval()
output = model(test_img)

index = output.argmax(1)

classes = ['adidas', 'nike']

print(classes[index])

三、遇到的问题

test.py对单个照片进行预测的时候,怎么预测都不对,最后把transforms改成了之前的train_transform解决了。
我之前写的transforms是这样的,考虑到时测试,没必须要加太多的参数

t_transforms = transforms.Compose([
    transforms.Resize([224, 224]),
    transforms.ToTensor(),
])

最后改为:

train_transforms = transforms.Compose([
    transforms.Resize([224, 224]),  # 将输入图片resize成统一尺寸
    # transforms.RandomHorizontalFlip(),  # 随机水平翻转
    transforms.ToTensor(),  # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
    transforms.Normalize(  # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225])  # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])

搞定

你可能感兴趣的:(深度学习实践100例)