torch.utils.data.DataLoader
功能:构建可迭代的数据装载器
Epoch:所有训练样本都已经输入到模型中,称为一个Epoch
Iteration:一批样本输入到模型中,称之为一个Iteration
Batchsize:批大小,决定一个Epoch有多少个Iteration
样本总数:80,Batchsize=8,1 Epoch = 10 Iteration
当样本总数为:87,Batchsize=8,一般默认都是drop_last=False
drop_last=True | 1 Epoch = 10 Iteration------>正确的!!!多余的7个被丢弃 |
drop_last=False | 1 Epoch = 10 Iteration------>错误的!!! |
torch.utils.data.Dataset
功能:Dataset抽象类,所有自定义的Dataset需要继承它,并且复写:__getitem__()
__getitem__():接收一个索引,返回一个样本
数据集主要分为:一元和100元:每一个类单独文件夹存放!
程序主要按照下面步骤进行:
对数据集进行划分:原始图像每个类100张
train | 80 |
valid | 10 |
test | 10 |
import torch
import os
# shutil:高级的 文件、文件夹、压缩包 处理模块
import shutil
import random
# 显示当前路径
# BASE_DIR = os.path.dirname(os.path.abspath('__file__'))
# print(BASE_DIR)
# 创建新的目录
def makedir(new_dir):
if not os.path.exists(new_dir):
os.makedirs(new_dir)
# 当模块被直接运行时,以下代码块将被运行,当模块是被导入时,代码块不被运行
if __name__ == '__main__':
dataset_dir = os.path.join("data","RMB_data")
split_dir = os.path.join("data","rmb_split")
# 将每个类别进行划分Train/valid/test三个部分
train_dir = os.path.join(split_dir,"train")
valid_dir = os.path.join(split_dir,"valid")
test_dir = os.path.join(split_dir,"test")
# 判断目录是否存在,不要也可以
# if not os.path.exists(dataset_dir):
# raise Exception("\n{} 不存在重新下载放到 {}下,并解压即可".format(
# dataset_dir, os.path.dirname(dataset_dir)))
# 数据集划分比例
train_pct = 0.8
valid_pct = 0.1
test_pct = 0.1
for root,dirs,files in os.walk(dataset_dir):
for sub_dir in dirs:
imgs = os.listdir(os.path.join(root,sub_dir))
imgs = list(filter(lambda x: x.endswith('.jpg'),imgs))
random.shuffle(imgs)
img_count = len(imgs)
train_point = int(img_count*train_pct)
valid_point = int(img_count*(train_pct + valid_pct))
for i in range(img_count):
if i < train_point:
out_dir = os.path.join(train_dir,sub_dir)
elif i < valid_point:
out_dir = os.path.join(valid_dir,sub_dir)
else:
out_dir = os.path.join(test_dir,sub_dir)
makedir(out_dir)
target_path = os.path.join(out_dir,imgs[i])
src_path = os.path.join(dataset_dir,sub_dir,imgs[i])
# 复制文件从源文件到目标文件
shutil.copy(src_path,target_path)
print('Class:{}, train:{}, valid:{}, test:{}'.format(sub_dir, train_point, valid_point-train_point,
img_count-valid_point))
print("已在 {} 创建划分好的数据\n".format(out_dir))
Class:1, train:80, valid:10, test:10 已在 data\rmb_split\test\1 创建划分好的数据 Class:100, train:80, valid:10, test:10 已在 data\rmb_split\test\100 创建划分好的数据
import os
# BASE_DIR = os.path.dirname(os.path.abspath('__file__'))/
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torch.optim as optim
from matplotlib import pyplot as plt
# lenet存放在model文件夹中
path_lenet = os.path.join("model","lenet.py")
# print(path_lenet)
# common_tools.py存放路径
path_tools = os.path.join("tools","common_tools.py")
# print(path_tools)
# 下面是进行判断的操作
# 不添加也可以
# assert os.path.exists(path_lenet), "{}不存在,请将lenet.py文件放到 {}".format(path_lenet, os.path.dirname(path_lenet))
# assert os.path.exists(path_tools), "{}不存在,请将common_tools.py文件放到 {}".format(path_tools, os.path.dirname(path_tools))
# import sys
# hello_pytorch_DIR = os.path.abspath(os.path.dirname('__file__')+os.path.sep+".."+os.path.sep+"..")
# print(hello_pytorch_DIR)
# sys.path.append(hello_pytorch_DIR)
from model.lenet import LeNet
from tools.my_dataset import RMBDataset
from tools.common_tools import set_seed
set_seed()
rmb_label = {"1":0,"100":1}
# 参数设置
MAX_EPOCH = 10
BATCH_SIZE = 16
LR = 0.01
log_interval = 10
val_interval = 1
# 简单的拼接地址
split_dir = os.path.join("data","rmb_split")
# print(split_dir)
# 训练集地址
train_dir = os.path.join(split_dir,"train")
# 验证集地址
valid_dir = os.path.join(split_dir,"valid")
# print(train_dir)
# print(valid_dir)
# 数据增强
norm_mean = [0.485,0.456,0.406]
norm_std = [0.229,0.224,0.225]
train_transform = transforms.Compose([
transforms.Resize((32,32)),
transforms.RandomCrop(32,padding=4),
transforms.ToTensor(),
transforms.Normalize(norm_mean,norm_std),
])
valid_transform = transforms.Compose([
transforms.Resize((32,32)),
transforms.ToTensor(),
transforms.Normalize(norm_mean,norm_std),
])
# 构建MyDataset实例
train_data = RMBDataset(data_dir=train_dir,transform=train_transform)
valid_data = RMBDataset(data_dir=valid_dir,transform=valid_transform)
# 构建DataLoader
train_loader = DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True)
valid_loader = DataLoader(dataset=valid_data,batch_size=BATCH_SIZE)
# 模型
net = LeNet(classes=2)
# 初始化权重参数
net.initialize_weights()
# 损失函数
criterion = nn.CrossEntropyLoss()
# 优化器
optimizer = optim.SGD(net.parameters(),lr=LR,momentum=0.9)
# 设置学习率下降策略
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=10,gamma=0.1)
train_curve = list()
valid_curve = list()
for epoch in range(MAX_EPOCH):
loss_mean = 0.
correct = 0.
total = 0.
net.train()
for i,data in enumerate(train_loader):
#forward
inputs,labels = data
outputs = net(inputs)
# backward
optimizer.zero_grad()
loss = criterion(outputs,labels)
loss.backward()
# update权重
optimizer.step()
# 统计分类情况
_,predicted = torch.max(outputs.data,1)
total += labels.size(0)
correct += (predicted==labels).squeeze().sum().numpy()
# 打印训练信息
loss_mean += loss.item()
train_curve.append(loss.item())
if (i+1) % log_interval == 0:
loss_mean = loss_mean / log_interval
print("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total))
loss_mean = 0.
# 更新学习率
scheduler.step()
# 验证模型
if (epoch+1) % val_interval == 0:
correct_val = 0.
total_val = 0.
loss_val = 0.
net.eval()
# 测试的话就不需要对梯度进行更新了
with torch.no_grad():
for j,data in enumerate(valid_loader):
inputs,labels = data
outputs = net(inputs)
loss = criterion(outputs,labels)
_,predicted = torch.max(outputs.data,1)
total_val += labels.size(0)
correct_val += (predicted==labels).squeeze().sum().numpy()
loss_val += loss.item()
loss_val_epoch = loss_val / len(valid_loader)
valid_curve.append(loss_val_epoch)
print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val_epoch, correct_val / total_val))
train_x = range(len(train_curve))
train_y = train_curve
train_iters = len(train_loader)
valid_x = np.arange(1, len(valid_curve)+1) * train_iters*val_interval # 由于valid中记录的是epochloss,需要对记录点进行转换到iterations
valid_y = valid_curve
plt.plot(train_x, train_y, label='Train')
plt.plot(valid_x, valid_y, label='Valid')
plt.legend(loc='upper right')
plt.ylabel('loss value')
plt.xlabel('Iteration')
plt.show()
Training:Epoch[000/010] Iteration[010/013] Loss: 0.6107 Acc:69.38% Valid: Epoch[000/010] Iteration[003/003] Loss: 0.7187 Acc:57.89% Training:Epoch[001/010] Iteration[010/013] Loss: 0.7202 Acc:65.00% Valid: Epoch[001/010] Iteration[003/003] Loss: 0.3015 Acc:100.00% Training:Epoch[002/010] Iteration[010/013] Loss: 0.1356 Acc:100.00% Valid: Epoch[002/010] Iteration[003/003] Loss: 0.0002 Acc:100.00% Training:Epoch[003/010] Iteration[010/013] Loss: 0.0214 Acc:99.38% Valid: Epoch[003/010] Iteration[003/003] Loss: 0.0001 Acc:100.00% Training:Epoch[004/010] Iteration[010/013] Loss: 0.0008 Acc:100.00% Valid: Epoch[004/010] Iteration[003/003] Loss: 0.0000 Acc:100.00% Training:Epoch[005/010] Iteration[010/013] Loss: 0.0000 Acc:100.00% Valid: Epoch[005/010] Iteration[003/003] Loss: 0.0000 Acc:100.00% Training:Epoch[006/010] Iteration[010/013] Loss: 0.0001 Acc:100.00% Valid: Epoch[006/010] Iteration[003/003] Loss: 0.0000 Acc:100.00% Training:Epoch[007/010] Iteration[010/013] Loss: 0.0001 Acc:100.00% Valid: Epoch[007/010] Iteration[003/003] Loss: 0.0000 Acc:100.00% Training:Epoch[008/010] Iteration[010/013] Loss: 0.0001 Acc:100.00% Valid: Epoch[008/010] Iteration[003/003] Loss: 0.0000 Acc:100.00% Training:Epoch[009/010] Iteration[010/013] Loss: 0.0000 Acc:100.00% Valid: Epoch[009/010] Iteration[003/003] Loss: 0.0000 Acc:100.00%
# 使用一张图像进行测试
# BASE_DIR = os.path.dirname(os.path.abspath('__file__'))
test_dir = os.path.join("test_data")
test_data = RMBDataset(data_dir=test_dir, transform=valid_transform)
valid_loader = DataLoader(dataset=test_data, batch_size=1)
# 进行测试
for i, data in enumerate(valid_loader):
# forward
inputs, labels = data
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
rmb = 1 if predicted.numpy()[0] == 0 else 100
print("模型获得{}元".format(rmb))
结果:模型获得100元
附加文件:
(1)、lenet.py存放再model文件夹中
import torch.nn as nn
import torch.nn.functional as F
class LeNet(nn.Module):
def __init__(self, classes):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, classes)
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0, 0.1)
m.bias.data.zero_()
class LeNet2(nn.Module):
def __init__(self, classes):
super(LeNet2, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 6, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(6, 16, 5),
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
self.classifier = nn.Sequential(
nn.Linear(16*5*5, 120),
nn.ReLU(),
nn.Linear(120, 84),
nn.ReLU(),
nn.Linear(84, classes)
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size()[0], -1)
x = self.classifier(x)
return x
class LeNet_bn(nn.Module):
def __init__(self, classes):
super(LeNet_bn, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.bn1 = nn.BatchNorm2d(num_features=6)
self.conv2 = nn.Conv2d(6, 16, 5)
self.bn2 = nn.BatchNorm2d(num_features=16)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.bn3 = nn.BatchNorm1d(num_features=120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, classes)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.bn3(out)
out = F.relu(out)
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight.data, 0, 1)
m.bias.data.zero_()
(2)、common_tool.py存放在tools中
import torch
import random
import psutil
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
def transform_invert(img_, transform_train):
"""
将data 进行反transfrom操作
:param img_: tensor
:param transform_train: torchvision.transforms
:return: PIL image
"""
if 'Normalize' in str(transform_train):
norm_transform = list(filter(lambda x: isinstance(x, transforms.Normalize), transform_train.transforms))
mean = torch.tensor(norm_transform[0].mean, dtype=img_.dtype, device=img_.device)
std = torch.tensor(norm_transform[0].std, dtype=img_.dtype, device=img_.device)
img_.mul_(std[:, None, None]).add_(mean[:, None, None])
img_ = img_.transpose(0, 2).transpose(0, 1) # C*H*W --> H*W*C
if 'ToTensor' in str(transform_train):
img_ = img_.detach().numpy() * 255
if img_.shape[2] == 3:
img_ = Image.fromarray(img_.astype('uint8')).convert('RGB')
elif img_.shape[2] == 1:
img_ = Image.fromarray(img_.astype('uint8').squeeze())
else:
raise Exception("Invalid img shape, expected 1 or 3 in axis 2, but got {}!".format(img_.shape[2]) )
return img_
def set_seed(seed=1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def get_memory_info():
virtual_memory = psutil.virtual_memory()
used_memory = virtual_memory.used/1024/1024/1024
free_memory = virtual_memory.free/1024/1024/1024
memory_percent = virtual_memory.percent
memory_info = "Usage Memory:{:.2f} G,Percentage: {:.1f}%,Free Memory:{:.2f} G".format(
used_memory, memory_percent, free_memory)
return memory_info