pytorh模型训练、测试

目录

1 导入数据集

2 使用tensorboard展示经过各个层的图片数据

3 完整的模型训练测试流程

使用Gpu训练的两种方式

使用tensorboard显示模型

模型训练测试

L1Loss函数

保存未训练模型或者已经训练完的模型

4 加载训练好的模型进行测试


1 导入数据集

import torch
from torch.utils.data import DataLoader
import torchvision
from torchvision import transforms
import torch.nn as nn
# 准备数据集
from torch.utils.tensorboard import SummaryWriter
train_data = torchvision.datasets.CIFAR10('./data', train=True, transform=transforms.ToTensor(), download=True)
print("train数据", train_data)
print(f"train_data数据的长度是{len(train_data)}")
train_data = DataLoader(dataset=train_data, batch_size=64, shuffle=True)
print("------------------------------------------")
test_data = torchvision.datasets.CIFAR10('./data', train=False, transform=transforms.ToTensor(), download=True)
print("test数据", test_data)
print(f"test_data数据的长度是{len(test_data)}")
print("第一条数据",test_data[0])
test_data = DataLoader(dataset=test_data, batch_size=64, shuffle=True)

train数据 Dataset CIFAR10
    Number of datapoints: 50000
    Root location: ./data
    Split: Train
    StandardTransform
Transform: ToTensor()
train_data数据的长度是50000
------------------------------------------
Files already downloaded and verified
test数据 Dataset CIFAR10
    Number of datapoints: 10000
    Root location: ./data
    Split: Test
    StandardTransform
Transform: ToTensor()
test_data数据的长度是10000
第一条数据 (tensor([[[0.6196, 0.6235, 0.6471,  ..., 0.5373, 0.4941, 0.4549],
         [0.5961, 0.5922, 0.6235,  ..., 0.5333, 0.4902, 0.4667],
         [0.5922, 0.5922, 0.6196,  ..., 0.5451, 0.5098, 0.4706],
         ...,
         [0.2667, 0.1647, 0.1216,  ..., 0.1490, 0.0510, 0.1569],
         [0.2392, 0.1922, 0.1373,  ..., 0.1020, 0.1137, 0.0784],
         [0.2118, 0.2196, 0.1765,  ..., 0.0941, 0.1333, 0.0824]],

        [[0.4392, 0.4353, 0.4549,  ..., 0.3725, 0.3569, 0.3333],
         [0.4392, 0.4314, 0.4471,  ..., 0.3725, 0.3569, 0.3451],
         [0.4314, 0.4275, 0.4353,  ..., 0.3843, 0.3725, 0.3490],
 

2 使用tensorboard展示经过各个层的图片数据

class convModel(nn.Module):
    def __init__(self):
        super(convModel, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=3, out_channels=6, stride=1, padding=0, bias=True, padding_mode='zeros', kernel_size=3)
    def forward(self, input_data):
        return self.conv1(input_data)
write = SummaryWriter('convModel')
model = convModel()
for batch_id, data in enumerate(test_data):
    write.add_images('原始图片数据展示', data[0], dataformats='NCHW', global_step=batch_id)
    input_data, label = data[0],data[1]
    print("batchSize",input_data.size(0))
    output_data = model(input_data)
    # 因为经过卷积之后,通道数变为6了,而add_images的源码中要求的通道个数是4个,所以要进行通道变换
    output_shape_data = torch.reshape(output_data,(-1,3,30,30))
    write.add_images('经过卷积层之后的图片数据展示', output_shape_data, global_step=batch_id)
    if batch_id % 300 == 0:
        print("原始数据的形状", input_data.shape)
        print("经过卷积层之后的形状", output_data.shape)
        print("经过形状改变之后", output_shape_data.shape)
write.close()

batchSize 64
原始数据的形状 torch.Size([64, 3, 32, 32])
经过卷积层之后的形状 torch.Size([64, 6, 30, 30])
经过形状改变之后 torch.Size([128, 3, 30, 30])
batchSize 64
batchSize 64
batchSize 64
batchSize 64
batchSize 64
batchSize 64
batchSize 64

class maxPoolingModel(nn.Module):
    def __init__(self):
        super(maxPoolingModel, self).__init__()
        self.MaxPool2d = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=False)
    def forward(self, input_data):
        return self.MaxPool2d(input_data)
write = SummaryWriter('maxPoolingModel')
model = maxPoolingModel()
for batch_id, data in enumerate(test_data):
    write.add_images('原始图片数据展示', data[0], dataformats='NCHW', global_step=batch_id)
    input_data, label = data[0],data[1]
    output_data = model(input_data)
    write.add_images('经过池化层之后的图片数据展示', output_data, global_step=batch_id)
    if batch_id % 300 == 0:
        print("原始数据的形状", input_data.shape)
        print("经过池化层之后的形状", output_data.shape)
write.close()

原始数据的形状 torch.Size([64, 3, 32, 32])
经过池化层之后的形状 torch.Size([64, 3, 16, 16])

class unLineModel(nn.Module):
    def __init__(self):
        super(unLineModel, self).__init__()
        self.sigmoid = nn.Sigmoid()
    def forward(self, input_data):
        return self.sigmoid(input_data)
write = SummaryWriter('unLineModel')
model = unLineModel()
for batch_id, data in enumerate(test_data):
    write.add_images('原始图片数据展示', data[0], dataformats='NCHW', global_step=batch_id)
    input_data, label = data[0],data[1]
    output_data = model(input_data)
    write.add_images('经过非线性层之后的图片数据展示', output_data, global_step=batch_id)
    if batch_id % 300 == 0:
        print("原始数据的形状", input_data.shape)
        print("经过非线性层之后的形状", output_data.shape)
write.close()

原始数据的形状 torch.Size([64, 3, 32, 32])
经过非线性层之后的形状 torch.Size([64, 3, 32, 32])

3 完整的模型训练测试流程

class Model(nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.model = nn.Sequential(
                  nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2,stride=1),
                  nn.MaxPool2d(kernel_size=2, stride=2),
                  nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2,stride=1),
                  nn.MaxPool2d(kernel_size=2, stride=2),
                  nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2,stride=1),
                  nn.MaxPool2d(kernel_size=2, stride=2),
                  nn.Flatten(),
                  nn.Linear(in_features=1024, out_features=64),
                  nn.Linear(in_features=64, out_features=10)
                )
    def forward(self, batch_data):
        return self.model(batch_data)
# 第一种方式:使用cuda,只需要给模型、损失函数、训练数据、测试数据调用cuda即可,但是这种情况下必须使用if torch.cuda.is_available():判断是否存在cuda,没有的话还是使用cpu,但是没有使用torch.cuda.is_available()判断的话会出错,导致程序无法运行
# 第二种方式:cuda:0 单个显卡
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
model = Model()
model.to(device=device)
print(model)
batch_data = torch.ones((64,3,32,32)).to(device)
output = model(batch_data)
print("output.shape", output.shape)

Model(
  (model): Sequential(
    (0): Conv2d(3, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (2): Conv2d(32, 32, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (4): Conv2d(32, 64, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    (5): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
    (6): Flatten(start_dim=1, end_dim=-1)
    (7): Linear(in_features=1024, out_features=64, bias=True)
    (8): Linear(in_features=64, out_features=10, bias=True)
 

使用Gpu训练的两种方式
# 第一种方式:使用cuda,只需要给模型、损失函数、训练数据、测试数据调用cuda即可,但是这种情况下必须使用if torch.cuda.is_available():判断是否存在cuda,没有的话还是使用cpu,但是没有使用torch.cuda.is_available()判断的话会出错,导致程序无法运行
# 第二种方式:cuda:0 单个显卡
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu'),在这种情况写,只需要将对应的模型、损失函数、训练数据、测试数据使用to调用device上即可
使用tensorboard显示模型
from torch.utils.tensorboard import SummaryWriter
write = SummaryWriter('model')
write.add_graph(model=model,input_to_model=batch_data)
write.close()
模型训练测试
# -------------------------CrossEntropyLoss()维度要求的底层源码-------------------
# Shape:
#         - Input: :math:`(N, C)` where `C = number of classes`, or
#           :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`
#           in the case of `K`-dimensional loss.
#         - Target: :math:`(N)` where each value is :math:`0 \leq \text{targets}[i] \leq C-1`, or
#           :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case of
#           K-dimensional loss.
#         - Output: scalar.
#           If :attr:`reduction` is ``'none'``, then the same size as the target:
#           :math:`(N)`, or
#           :math:`(N, d_1, d_2, ..., d_K)` with :math:`K \geq 1` in the case
#           of K-dimensional loss.
criteria = nn.CrossEntropyLoss()
criteria.to(device)
nn.L1Loss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
sum_loss_list = []
epoch_list = []
from torch.utils.tensorboard import  SummaryWriter
write = SummaryWriter("figure")
total_train_step = 0
total_test_step = 0
for epoch in range(50):
    ever_epoch_loss_sum = 0.0
    print("---------------------第 {} 轮训练开始---------------------".format(epoch+1))
    model.train()
    for batch_id, data in enumerate(train_data):
        input_data, input_label = data
        input_data = input_data.to(device)
        input_label = input_label.to(device)
        output_data = model(input_data)
        # if batch_id%300 == 0:
        #     print("input_data", input_data.shape)  # input_data torch.Size([64, 3, 32, 32])
        #     print("output_data", output_data.shape) # output_data torch.Size([64, 10])
        #     print("input_label", input_label.shape) # input_label torch.Size([64])
        loss = criteria(output_data, input_label)
        ever_epoch_loss_sum = ever_epoch_loss_sum + loss.item()
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()
        if total_train_step % 200 == 0:
            print("当前总的训练次数:{} ,每一次的Loss:{}".format(total_train_step, loss.item()))
            write.add_scalar('train_loss', loss.item(), total_train_step)
        total_train_step = total_train_step + 1
    sum_loss_list.append(ever_epoch_loss_sum)
    epoch_list.append(epoch)
    print("---------------------第 {} 轮测试开始---------------------".format(epoch+1))
    model.eval()
    total_test_loss = 0
    total_accuracy = 0
    with torch.no_grad():
        for batch_id, data in enumerate(test_data):
            images, label = data
            images = images.to(device)
            label = label.to(device)
            output = model(images)
            loss = criteria(output,label)
            total_test_loss = total_test_loss + loss.item()
            accuracy = (output.argmax(1) == label).sum()
            total_accuracy = total_accuracy + accuracy
    print("整体测试集上的Loss: {}".format(total_test_loss))
    print("整体数据集上的正确率:{}".format(total_accuracy/len(test_data)))
    write.add_scalar("test_accuracy",total_accuracy/len(test_data),total_test_step)
    write.add_scalar('test_loss', total_test_loss, total_test_step)
    total_test_step = total_test_step + 1
    torch.save(model,'model_{}.pth'.format(epoch))
    print("保存第 {} 轮模型".format(epoch+1))
write.close()
L1Loss函数
inputs = torch.tensor([1,2,3], dtype=torch.float32)
print("原始数据inputs", inputs)
print("原始数据形状inputs.shape", inputs.shape)
targets = torch.tensor([1,2,5], dtype=torch.float32)
print("目标数据targets", targets)
print("目标数据形状targets.shape", targets.shape)
inputs = torch.reshape(inputs,(1,-1))
print("形状改变数据inputs", inputs)
print("形状改变数据形状inputs.shape", inputs.shape)
targets = torch.reshape(targets,(1,-1))
print("目标数据改变targets", targets)
print("目标数据形状改变targets.shape", targets.shape)
loss = nn.L1Loss()
 # - Input: :math:`(N, *)` where :math:`*` means, any number of additional
 #          dimensions
 #        - Target: :math:`(N, *)`, same shape as the input
result = loss(inputs, targets)
print(result)

原始数据inputs tensor([1., 2., 3.])
原始数据形状inputs.shape torch.Size([3])
目标数据targets tensor([1., 2., 5.])
目标数据形状targets.shape torch.Size([3])
形状改变数据inputs tensor([[1., 2., 3.]])
形状改变数据形状inputs.shape torch.Size([1, 3])
目标数据改变targets tensor([[1., 2., 5.]])
目标数据形状改变targets.shape torch.Size([1, 3])
tensor(0.6667)

保存未训练模型或者已经训练完的模型
# 模型的保存
torch.save(model,'class_model.pth')
# 模型加载,但是这种情况下如果加载的模型和原本的模型没有在同一个文件中,那么需要将原本的模型使用from加载到当前文件中,再使用torch.load
model = torch.load('class_model.pth')
print(model)

4 加载训练好的模型进行测试

import torch.nn as nn
import torch
class Model(nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.model = nn.Sequential(
                  nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, padding=2,stride=1),
                  nn.MaxPool2d(kernel_size=2, stride=2),
                  nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, padding=2,stride=1),
                  nn.MaxPool2d(kernel_size=2, stride=2),
                  nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, padding=2,stride=1),
                  nn.MaxPool2d(kernel_size=2, stride=2),
                  nn.Flatten(),
                  nn.Linear(in_features=1024, out_features=64),
                  nn.Linear(in_features=64, out_features=10)
                )
    def forward(self, batch_data):
        return self.model(batch_data)
load_model = torch.load('G:\python_files\深度学习代码库\model_49.pth') # 加载模型,使用上面的非字典形式保存的模型,这个时候加载的时候必须把原本定义的模型加载到当前文件,然后使用该函数加载。同时需要注意,如果加载的模型原本是在cuda上跑的,这个时候要将模型使用load函数的参数map_location=torch.device('cpu')加载到cpu上,或者也可以将图片的数据放入到gpu上,与原本的模型对应起来
from torchvision import transforms
from PIL import Image
image = Image.open('G:\python_files\深度学习代码库\cats\cat\cat.10.jpg')
trans = transforms.Compose([transforms.Resize((32,32)),transforms.ToTensor()])
trans_image_tensor = trans(image)
# 因为上面的模型训练的时候输入是(NCHW),所以将训练完的模型加载进来使用测试的时候必须要将一张图片reshape模型需要的形状,否则就会报错
tensor_shape = torch.reshape(trans_image_tensor,(1,3,32,32)).to(torch.device("cuda:0"))
load_model.eval()
with torch.no_grad():
    output = load_model(tensor_shape)
    print(output)

注意事项:

load_model = torch.load('G:\python_files\深度学习代码库\model_49.pth') # 加载模型,使用上面的非字典形式保存的模型,这个时候加载的时候必须把原本定义的模型加载到当前文件,然后使用该函数加载。同时需要注意,如果加载的模型原本是在cuda上跑的,这个时候要将模型使用load函数的参数map_location=torch.device('cpu')加载到cpu上,或者也可以将图片的数据放入到gpu上,与原本的模型对应起来
# 因为上面的模型训练的时候输入是(NCHW),所以将训练完的模型加载进来使用测试的时候必须要将一张图片reshape模型需要的形状,否则就会报错
tensor_shape = torch.reshape(trans_image_tensor,(1,3,32,32)).to(torch.device("cuda:0"))

你可能感兴趣的:(Pytorch,深度学习,pytorch,神经网络,机器学习,人工智能)