本文使用vgg网络实现对猫狗分类。 可以当做图像分类的一个baseline。
数据:直接到kaggle上下载相应的数据集即可。
# 数据
import torch
from torchvision.datasets import ImageFolder
import torch.utils.data as Data
from torchvision import transforms
# 模型
import torch.nn as nn
from torchvision.models import vgg16
# 损失函数
import torch.nn as nn
# 优化器
import torch.optim as optim
# 训练
import numpy as np
from torch.utils.tensorboard import SummaryWriter
import matplotlib.pyplot as plt
# 工具
from torchsummary import summary
import gc
# 预测
from PIL import Image
import cv2
import glob
如果有GPU则使用GPU并且清空当前GPU显存,否则使用CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device == torch.device('cuda'):
gc.collect()
torch.cuda.empty_cache()
数据部分主要是构造深度学习模型所需要的“特定数据”,为什么说特定呢?因为模型是很矫情的,一般来说模型对输入图像的尺寸、格式等都有要求,所以我们要按照要求来构造模型所需要的数据集。
# 数据预处理
transform = transforms.Compose([
transforms.Resize([224,224]), # 将图像resize到指定大小
transforms.ToTensor(), # 将图像转为tensor
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # 对图像的进行标准化(使用ImageNet数据集的均值和标准差)
])
# train 数据集
trainset = ImageFolder(root='./1-cat-dog/train/',transform = transform) # 构造训练集的Dataset
train_loader = Data.DataLoader(trainset,batch_size = 16,shuffle = True,num_workers = 0) # 构造数据集的DataLoader
# val 数据集
valset = ImageFolder(root = './1-cat-dog/val/',transform = transform) # 与训练集一样,构造验证集的Dataset
val_loader = Data.DataLoader(valset,batch_size =2,num_workers = 0) # 构造验证集的DataLoader
# 查看类别与数字的对应关系
valset.class_to_idx
{'cat': 0, 'dog': 1}
这一部分是模型部分,我们使用pytorch封装好的vgg16模型,并且对其进行微调,因为原始的vgg16模型是预测1000个类别,而我们的任务只有猫和狗两个类别,所以将最后一层的输出调整为2。在训练的时候使用前面预训练模型,这样子加快模型的收敛,而且效果奇佳。
model = vgg16(pretrained = True).to(device)
# 看一下原始模型
# 加载预训练模型,并且对模型进行微调
weights_path = './vgg16-397923af.pth'
model.load_state_dict(torch.load(weights_path,map_location=device))
for param in model.parameters():
param.requires_grad = False
model.classifier._modules['6'] = nn.Linear(4096,2)
model.to(device)
VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(6): ReLU(inplace=True)
(7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(11): ReLU(inplace=True)
(12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(13): ReLU(inplace=True)
(14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(18): ReLU(inplace=True)
(19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(20): ReLU(inplace=True)
(21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(22): ReLU(inplace=True)
(23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(25): ReLU(inplace=True)
(26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(27): ReLU(inplace=True)
(28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(29): ReLU(inplace=True)
(30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=2, bias=True)
)
)
class MY_VGG16(nn.Module):
def __init__(self, init_weights=True):
super(MY_VGG16, self).__init__()
self.block1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
)
self.block2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
)
self.block3 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
)
self.block4 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
)
self.block5 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)),
nn.ReLU(),
nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),
)
self.block6 = nn.Sequential(
nn.Linear(in_features=512*7*7, out_features=4096),
nn.ReLU(),
nn.Linear(in_features=4096, out_features=4096),
nn.ReLU(),
nn.Linear(in_features=4096, out_features=2)
)
if init_weights:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = torch.flatten(x, start_dim=1)
x = self.block6(x)
return x
model = MY_VGG16(init_weights=True).to(device)
这一部分主要是定义损失函数和优化器
# 损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
有了数据、模型、损失函数和优化器之后,我们就可以开始训练模型了。 模型的训练其实就是经过一些列的迭代步骤,让模型学习从“输入”到“输出”的映射函数。
这里我们定义两个函数,train_one_epoch和valid_one_epoch:
def train_one_epoch(train_loader, model, criterion, optimizer):
model.train()
running_loss = 0.0
metric = 0.0
total = 0.0
for i, data in enumerate(train_loader):
images, labels = data
images = images.to(device)
labels = labels.to(device)
outs = model(images)
optimizer.zero_grad()
loss = criterion(outs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, preds = torch.max(outs, 1)
metric += (preds == labels).sum().item()
total += labels.shape[0]
train_loss = running_loss/total
train_accuracy = 100*metric/total # 转成百分比
return train_loss, train_accuracy
def valid_one_epoch(valid_loader, model):
model.eval()
total = 0.0
metric = 0.0
for i,data in enumerate(valid_loader):
images, labels = data
images = images.to(device)
labels = labels.to(device)
outs = model(images)
_, preds = torch.max(outs,1)
total += labels.shape[0]
metric += (preds == labels).sum().item()
valid_accuracy = 100*metric/total
return valid_accuracy
然后我们就可以开始训练模型了,并且在模型训练完之后保存最终的模型。
epochs = 20
train_losses = []
train_accuracys = []
valid_accuracys = []
for epoch in range(epochs):
train_loss, train_accuracy = train_one_epoch(train_loader= train_loader, model = model, criterion = criterion, optimizer = optimizer)
print(f"epoch:{epoch}, loss:{train_loss}, accuracy:{train_accuracy}")
train_losses.append(train_loss)
train_accuracys.append(train_accuracy)
valid_accuracy = valid_one_epoch(valid_loader= val_loader, model = model)
print(f"valid accuracy:{valid_accuracy}")
valid_accuracys.append(valid_accuracy)
torch.save(model,'test.pth')
epoch:0, loss:0.020580440697570642, accuracy:87.91666666666667
valid accuracy:100.0
epoch:1, loss:0.004143970409252991, accuracy:99.58333333333333
valid accuracy:100.0
epoch:2, loss:0.0024526457864946375, accuracy:100.0
valid accuracy:100.0
epoch:3, loss:0.0018572715828971316, accuracy:99.79166666666667
valid accuracy:100.0
epoch:4, loss:0.001885829681608205, accuracy:99.79166666666667
valid accuracy:100.0
epoch:5, loss:0.0012724352748288462, accuracy:100.0
valid accuracy:100.0
epoch:6, loss:0.0012308882627015312, accuracy:99.58333333333333
valid accuracy:100.0
epoch:7, loss:0.000938531729237487, accuracy:100.0
valid accuracy:100.0
epoch:8, loss:0.0008737363134666035, accuracy:100.0
valid accuracy:100.0
epoch:9, loss:0.0007390241080429405, accuracy:100.0
valid accuracy:100.0
epoch:10, loss:0.000971730300807394, accuracy:99.79166666666667
valid accuracy:100.0
epoch:11, loss:0.0006812206517982607, accuracy:100.0
valid accuracy:100.0
epoch:12, loss:0.000887786266199934, accuracy:100.0
valid accuracy:100.0
epoch:13, loss:0.0006422517117850173, accuracy:100.0
valid accuracy:100.0
epoch:14, loss:0.0005755920026179714, accuracy:100.0
valid accuracy:100.0
epoch:15, loss:0.0005269992495110879, accuracy:100.0
valid accuracy:100.0
epoch:16, loss:0.0006754447094863281, accuracy:100.0
valid accuracy:100.0
epoch:17, loss:0.0004891647525558559, accuracy:100.0
valid accuracy:100.0
epoch:18, loss:0.0004677536303158073, accuracy:100.0
valid accuracy:100.0
epoch:19, loss:0.00048221670779942843, accuracy:100.0
valid accuracy:100.0
训练完成之后,我们来可视化一下训练过程中的误差变化。
plt.figure(figsize=(13,8))
plt.subplot(1,3,1)
plt.plot(range(epochs), train_losses)
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.subplot(1,3,2)
plt.plot(range(epochs),train_accuracys)
plt.title('Train Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.subplot(1,3,3)
plt.plot(range(epochs),valid_accuracys)
plt.title('Valid Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.savefig('loss_accuracy_changing_with_epoch.png')
plt.show()
png
有了最终的模型,我们来预测一下,先写一个函数封装一下。
classes = list(valset.class_to_idx)
model = torch.load('./test.pth')
def predict_one_image(image_path, model, transform, classes):
test_img = cv2.imread(image_path)
plt.imshow(test_img)
test_img = Image.fromarray(test_img)
test_img = transform(test_img)
img = test_img.to(device).unsqueeze(0)
model.eval()
output = model(img)
_,pred = torch.max(output,1)
pred_class = classes[pred]
print(f'预测结果:{pred_class}')
return pred_class
# 预测训练集中的某张照片
predict_one_image(image_path='./1-cat-dog/train/dog/flickr_dog_000003.jpg', model=model, transform=transform, classes=classes)
预测结果:dog
'dog'
png
# 来个恶搞,预测下白等,hhhha
predict_one_image(image_path='./1-cat-dog/OIP-C.jpg', model=model, transform=transform, classes=classes)
预测结果:dog
'dog'
png
# 预测我自己用手机拍的一张照片试试
predict_one_image(image_path='./1-cat-dog/gou.jpg', model=model, transform=transform, classes=classes)
预测结果:dog
'dog'
png
# 预测从bing上下载的一张照片
predict_one_image(image_path='./1-cat-dog/cat_with_hat.jpg', model=model, transform=transform, classes=classes)
预测结果:cat
'cat'
png
# 预测验证集中的一张照片
predict_one_image(image_path='./1-cat-dog/val/cat/flickr_cat_000003.jpg', model=model, transform=transform, classes=classes)
预测结果:cat
'cat'
png
vgg16的网络架构图如上所知,可以看到其总共有6个子块,之所以叫做vgg16是因为其中带有可学习参数的网络层一共有16层。
【1】https://www.cnblogs.com/vvlj/p/14141826.html
【2】https://neurohive.io/en/popular-networks/vgg16/
本文由 mdnice 多平台发布