pytorch入门笔记

3 pytorch

土堆视频

3.1 读图片数据和标签

数据准备:链接

from torch.utils.data import Dataset
import os
from PIL import Image

class MyData(Dataset):
    def __init__(self, root_dir, label_dir):
        self.root_dir = root_dir  # self.root_dir相当于在当前函数中设定一个全局变量,可供其他函数使用
        self.label_dir = label_dir
        self.path = os.path.join(self.root_dir, self.label_dir)  # 实现路径智能拼接,路径到ants,避免Windows路径问题(存在问题)
        self.img_path = os.listdir(self.path)  # 将图片的路径变返回为列表

    def __getitem__(self, idx):
        img_name = self.img_path[idx]
        img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)
        img = Image.open(img_item_path)
        label = self.label_dir
        return img, label

    def __len__(self):
        return len(self.img_path)
    
root_dir = "D:\PythonWP\MachineLearning\learn_pytorch\hymenoptera_data\\train"  # 复制路径后,可以输出进行检查
ants_label_dir = "ants"
bees_label_dir = "bees"
ant_dataset = MyData(root_dir, ants_label_dir)  # 初始化默认调用def __init__(self, root_dir, label_dir),返回一个对象
img,label=ant_dataset[0]  # 输入参数[0]默认调用def __getitem__(self, idx)
print(img,label) #输出结果: ants

3.2 TensorBoard(数据展示器)

1.eg1

from torch.utils.tensorboard import SummaryWriter

writer=SummaryWriter("logs")
for i in range(100):
    writer.add_scalar("y=2x",2*i,i)
writer.close()	
  • 输出结果会在同级目录生成logs文件夹:

  • Terminal输入:路径可以是相对路径。默认端口号6006,可以自定义端口号

    tensorboard --logdir=D:\PythonWP\MachineLearning\learn_pytorch\logs --port=6007

  • 输出结果为一个IP地址,点击进入,可映射出函数图像

2.eg2

from torch.utils.tensorboard import SummaryWriter
import  numpy as np
from PIL import Image

writer=SummaryWriter("logs")
image_path=r"D:\PythonWP\MachineLearning\learn_pytorch\hymenoptera_data\train\ants\5650366_e22b7e1065.jpg"
# 因为add_image()函数支持的图片个格式问题,所以要先对图片的格式进行处理
img_PIL=Image.open(image_path)
img_array=np.array(img_PIL)
print(type(img_array))	#
print(img_array.shape)	#(375, 500, 3),分别是(高,宽,通道)
writer.add_image("test",img_array,2,dataformats='HWC') #(主题,图像,步骤,图像形状)
writer.close()

路径问题:相对路径暂时无法读出,推荐使用绝对路径,但是Windows下的路径是反斜杠“\”,带有转译作用,存在以下解决方案:

  • 可以使用“\”使用两个反斜杠规避这个问题
  • 使用“/”正斜杠
  • 在路径前加上“r”使路径只读

3.3 transforms(图像编辑器)

transforms是一个编辑图片格式,尺寸,类型的包

1.__call__函数

class Person:
    def __call__(self, name):
        print("__call__函数说:" + name)

    def sayName(self, name):
        print("sayName函数说:" + name)
        
person = Person()
person("别期望")  # __call__函数说:别期望
person.sayName("贺梦坤")  # sayName函数说:贺梦坤

2.transforms中常用的函数讲解

from PIL import Image
from torchvision import transforms
import cv2
from torch.utils.tensorboard import SummaryWriter

img_path=r"D:\PythonWP\MachineLearning\learn_pytorch\hymenoptera_data\train\ants\0013035.jpg"

#ToTensor PIL Image转tensor
img=Image.open(img_path)
tensor_trans=transforms.ToTensor()
tensor_img=tensor_trans(img)
# print(tensor_img)

#opencv中numpy.ndarray 转换tensor
cv_img=cv2.imread(img_path) #不支持中文路径
cv_to_tensor_img=tensor_trans(cv_img)
# print(cv_to_tensor_img)

#Normalize 图像归一化
# print(tensor_img[0][0][0])
trans_norm=transforms.Normalize([1,5,10],[5,2,1])
img_norm=trans_norm(tensor_img)
# print(img_norm[0][0][0])

#Resize 图像尺寸更改
print(img.size)
trans_resize=transforms.Resize((512,512))
img_resize=trans_resize(img)
img_resize=tensor_trans(img_resize) #ToTensor方法会把(HWC)转换为(CHW),不加这一步可以在add_image()方法中指定

#compose 将上述函数组合使用
trans_resize_2=transforms.Resize(512)
#PIL->PIL->tensor 最后才能转换tensor类型,顺序不能变
trans_compose=transforms.Compose([trans_resize_2,tensor_trans])
img_resize_2=trans_compose(img)

writer=SummaryWriter("logs")
writer.add_image("Compose",img_resize_2,1)
writer.close()

3.4 Torchvision中的数据的使用

import torchvision
from torch.utils.tensorboard import SummaryWriter

dataset_transform = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor()
])
#args:(保存目录,是否为训练集,通过compose做数据,是否下载)
tranin_set = torchvision.datasets.CIFAR10(root="./dataset", train=True, transform=dataset_transform, download=True)
test_set = torchvision.datasets.CIFAR10(root="./dataset", train=False, transform=dataset_transform, download=True)

# print(test_set[0])  #(, 3)
# print(test_set.classes)#['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# img,target=test_set[0]
# print(img)#
# print(target) #3
# print(test_set[0])
writer = SummaryWriter("pp10")
for i in range(10):
    img, target = test_set[i]
    writer.add_image("test_set", img, i)
writer.close()

3.5 DataLoader(数据加载器)

import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

test_data = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor())
# 数据集的加载(加载数据集,每一批加载多少张,是否打乱顺序,用于数据加载的子进程数,是否舍弃不足batch_size的数据)
test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=True, num_workers=0, drop_last=True)
img,target=test_data[0]

writer = SummaryWriter("dataloader")#图片展示输出文件夹:dataloader
for epoch in range(2):#将图片作为2次训练集
    step = 0
    for data in test_loader:
        imgs, targets = data
        writer.add_images("Epoch: {}".format(epoch), imgs, step) #.format(epoch)起不同的名字
        step=step+1
writer.close()

3.6 卷积

torch.nn.functional官方文档

1.conv2()处理矩阵

import torch
import torch.nn.functional as F

input=torch.tensor([[1,2,0,3,1],
                    [0,1,2,3,1],
                    [1,2,1,0,0],
                    [5,2,3,1,1],
                    [2,1,0,1,1]])
kernel=torch.tensor([[1,2,1],
                     [0,1,0],
                     [2,1,0]])
# reshape(输入图片数量,通道,长,宽),将矩阵转换为conv2d需要的参数格式
input=torch.reshape(input,(1,1,5,5))
kernel=torch.reshape(kernel,(1,1,3,3))

print(input.shape)  #torch.Size([1, 1, 5, 5])
print(kernel.shape) #torch.Size([1, 1, 3, 3])
#conv2d(输入,卷积核,卷积步长)
# padding:默认为0,为1时在原图上补一圈0
# dilation:默认为1,卷积核之间的距离
# bias:偏移量,默认none
output=F.conv2d(input,kernel,stride=1)
print(output)

output2=F.conv2d(input,kernel,stride=2)
print((output2))

output3=F.conv2d(input,kernel,stride=1,padding=1)
print(output3)

2.通过Conv2()处理图像

import torch
import torchvision
from torch.utils.data import DataLoader
from torch import nn
from torch.nn import Conv2d
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)
dataloader = DataLoader(dataset, batch_size=64)

class Test_conv(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0)

    def forward(self, x):
        x = self.conv1(x)
        return x

t_conv = Test_conv()

writer = SummaryWriter("./logs")
step = 0
for data in dataloader:
    imgs, targets = data
    output = t_conv(imgs)
    # print(imgs.shape)  # torch.Size([64, 3, 32, 32])
    # print(output.shape)  # torch.Size([64, 6, 30, 30])
    writer.add_images("input", imgs, step)
    # 如果要通过torchBoard显示出来的话,只能识别图像的三通道
    # 通过reshape()变为3通道,相当于将图像平铺了,参数-1是让机器自动识别batch_size
    output = torch.reshape(output, (-1, 3, 30, 30))

    writer.add_images("output", output, step)
    step += 1

3.7 池化

dataset=torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader=DataLoader(dataset,batch_size=64)

class Test_maxpooling(nn.Module):
    def __init__(self):
        super().__init__()
        self.maxpool1=MaxPool2d(kernel_size=3,ceil_mode=False) 

    def forward(self,x):
        output=self.maxpool1(x)
        return output

t_mp=Test_maxpooling()
writer=SummaryWriter("logs_maxpooling")

step=0
for data in dataloader:
    imgs,targets=data
    writer.add_images("input2",imgs,step)
    output=t_mp(imgs)
    writer.add_images("output2",output,step)
    step+=1
writer.close()

参数ceil_mode

3.8 非线性激活

dataset=torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
dataloader=DataLoader(dataset,batch_size=64)

class Test_maxpooling(nn.Module):
    def __init__(self):
        super().__init__()
        # 两种非线性激活函数
        self.relu=ReLU()       
        self.sigmoid=Sigmoid()

    def forward(self,x):
        output=self.relu(x)
        return output

t_mp=Test_maxpooling()
writer=SummaryWriter("logs_line")

step=0
for data in dataloader:
    imgs,targets=data
    writer.add_images("input",imgs,step)
    output=t_mp(imgs)
    writer.add_images("output",output,step)
    step+=1
writer.close()

3.9 线性层

self.liner1=Linear(196608,10) #使图片形状有x1变为x2

output=torch.flatten(imgs)  # torch.Size([196608])->torch.Size([10])

3.10 完整网络与Sequential

import torch
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear

class Test_seq(nn.Module):
    def __init__(self):
        super(Test_seq, self).__init__()
        self.conv1 = Conv2d(3, 32, 5, padding=2)
        self.maxpool1 = MaxPool2d(2)
        self.conv2 = Conv2d(32, 32, 5, padding=2)
        self.maxpool2 = MaxPool2d(2)
        self.conv3 = Conv2d(32, 64, 5, padding=2)
        self.maxpool3 = MaxPool2d(2)
        self.flatten = Flatten()
        self.linear1 = Linear(1024, 64)
        self.linear2 = Linear(64, 10)

        self.model1 = nn.Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.model1(x)
        return x

t_seq = Test_seq()
input = torch.ones(64, 3, 32, 32) #生成64张3通道,32*32的全1矩阵
output = t_seq(input)
print(output.shape)

3.11 优化器

import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear
from torch.utils.data import DataLoader

dataset = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)
dataloader = DataLoader(dataset, batch_size=1)

class HMK(nn.Module):
    def __init__(self):
        super(HMK, self).__init__()
        self.conv1 = Conv2d(3, 32, 5, padding=2)
        self.maxpool1 = MaxPool2d(2)
        self.conv2 = Conv2d(32, 32, 5, padding=2)
        self.maxpool2 = MaxPool2d(2)
        self.conv3 = Conv2d(32, 64, 5, padding=2)
        self.maxpool3 = MaxPool2d(2)
        self.flatten = Flatten()
        self.linear1 = Linear(1024, 64)
        self.linear2 = Linear(64, 10)

        self.model1 = nn.Sequential(
            Conv2d(3, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 32, 5, padding=2),
            MaxPool2d(2),
            Conv2d(32, 64, 5, padding=2),
            MaxPool2d(2),
            Flatten(),
            Linear(1024, 64),
            Linear(64, 10)
        )

    def forward(self, x):
        x = self.model1(x)
        return x

loss = nn.CrossEntropyLoss()
hmk = HMK()
optim = torch.optim.SGD(hmk.parameters(), lr=0.01)  # 加载优化器(模型参数,训练速率)
for epoch in range(20):
    running_loss = 0.0
    for data in dataloader:
        imgs, targets = data
        outputs = hmk(imgs)
        result_loss = loss(outputs, targets)
        optim.zero_grad()  # ?节点梯度清零
        result_loss.backward()  # ?反向传播,求出每个节点的梯度
        optim.step()  # ?对模型每个参数进行调优
        running_loss = running_loss + result_loss
    print(f"第{epoch}轮训练误差:", running_loss)

3.12 对现有的网络模型进行修改

1.vgg16

vgg16_false=torchvision.models.vgg16(pretrained=False)  #参数未经过训练
vgg16_true=torchvision.models.vgg16(pretrained=True)    #参数已经预先训练好的
#利用现有的网络进行改动,将现有的网络当做一个前置网络,相当于预处理

dataset = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)
#因为数据为10分类,但是vgg16分类结果为1000分类,所以通过线性层变为10类
vgg16_true.add_module('add_linear',nn.Linear(1000,10))#方案一:增加一层
# print(vgg16_true)



vgg16_false.classifier[6]=nn.Linear(4096,10)#方案二:修改最后的线性层
print(vgg16_false)

3.13 模型的保存与读取

1.保存

vgg16=torchvision.models.vgg16(pretrained=False)
#保存方式一:不仅保存了网络模型的结构,而且保存了网络模型中的一些参数
torch.save(vgg16,"vgg16_method1.pth")

#保存方式二:以字典的形式,将网络模型的参数保存下来(官方推荐,空间小)
torch.save(vgg16.state_dict(),"vgg16_method2.pth")

2.读取

#方式一:对应保存方式加载模型
model1=torch.load("vgg16_method1.pth")
print(model1)

#方式二:
vgg16=torchvision.models.vgg16(pretrained=False)
vgg16.load_state_dict(torch.load("vgg16_method2.pth"))
# model2=torch.load("vgg16_method2.pth")
print(vgg16)

3.14 完整的训练模型

1.argmax函数

outputs = torch.tensor([[0.4, 0.7],
                        [0.5, 0.6]])
print(outputs.argmax(1))  # 返回横向最大值的下标 tensor([1, 1])
print(outputs.argmax(0))  # 返回竖向最大值的下标 tensor([1, 0])

2.使用CIFAR10数据集进行训练和分类,其中有10种类别,共进行了10轮,每轮,利用tensorboard进行训练和测试损失函数,分类精准度进行展示,利用cuda进行加速运行

  • train.py
 import torch
    import torchvision
    from torch.utils.data import DataLoader
    from torch.utils.tensorboard import SummaryWriter
    from learn_pytorch.test_model_hmk.model import *
    
    # 准备数据,读取数据
    train_data = torchvision.datasets.CIFAR10(root="./dataset", train=True, transform=torchvision.transforms.ToTensor(),
                                              download=True)
    test_data = torchvision.datasets.CIFAR10(root="./dataset", train=False, transform=torchvision.transforms.ToTensor(),
                                             download=True)
    # 定义运行方式
    device = torch.device("cuda:0")
    # 长度
    train_data_size = len(train_data)
    test_data_size = len(test_data)
    print(f"训练集长度:{train_data_size}")
    print(f"测试集长度:{test_data_size}")
    
    # DataLoader加载数据
    train_dataloader = DataLoader(train_data, batch_size=64)
    test_dataloader = DataLoader(test_data, batch_size=64)
    
    # 创建网络模型
    hmk = HMK()
    # 运行方式
    hmk = hmk.to(device)
    
    # print(hmk.aaa)
    # 损失函数
    loss_fn = nn.CrossEntropyLoss()
    # 运行方式
    loss_fn = loss_fn.to(device)
    # 优化器
    learning_rate = 1e-2
    optimizer = torch.optim.SGD(hmk.parameters(), lr=learning_rate)
    # 记录训练次数
    total_train_step = 0
    # 记录测试次数
    total_test_step = 0
    # 训练轮数
    epoch = 10
    # 添加tensorboard
    writer = SummaryWriter("./logs_train")
    
    for i in range(epoch):
        print(f"--------第{i}轮训练开始---------")
        # 如果网络中有Dropout,BatchNorm等,才会产生作用
        hmk.train()
        for data in train_dataloader:
            imgs, targets = data
            # 运行方式
            imgs = imgs.to(device)
            targets = targets.to(device)
    
            output = hmk(imgs)
            loss = loss_fn(output, targets)
            # 优化数据模型
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    
            total_train_step += 1
    
            if total_train_step % 100 == 0:
                print(f"训练次数:{total_train_step},loss:{loss.item()}")
                writer.add_scalar("tranin_loss", loss.item(), total_train_step)
        # 如果网络中有Dropout,BatchNorm等,才会产生作用
        hmk.eval()
        # 测试步骤开始
        total_test_loss = 0
        # 总精确度
        total_accuracy = 0
        # 设置取消梯度,因为测试的时候不需要对梯度进行调整
        with torch.no_grad():
            for data in test_dataloader:
                imgs, targets = data
                # 运行方式
                imgs = imgs.to(device)
                targets = targets.to(device)
                output = hmk(imgs)
                loss = loss_fn(output, targets)
                total_test_lose = total_test_step + loss.item()
                writer.add_scalar("test_loss", loss.item(), total_test_step)
                accuracy = (output.argmax(1) == targets).sum()
                total_accuracy = accuracy + total_accuracy
            print(f"测试集正确率:{total_accuracy / test_data_size}")
        print(f"测试集上的Loss:{total_test_lose}")
        writer.add_scalar("test_loss", total_test_lose, total_test_step)
        writer.add_scalar("test_accuracy", total_accuracy / test_data_size, total_test_step)
        total_test_step += 1
        # 保存每一轮训练结果
        torch.save(hmk, f"hmk_{i}.pth")
        print("模型已保存")
    writer.close()


你可能感兴趣的:(AI,pytorch,深度学习,python)