Pytorch学习笔记(八)----经典卷积神经网络alexnet学习与CIFAR10数据集初探

序言

从本节起,将正式走入深度神经网络的大门。关于什么是alexnet、什么是cifar10自行百度。在学习卷积神经网络前,得明白什么是卷积、什么是池化。详细的解释见基础理论中提供的pdf文档。本节主要是使用cifar10数据集对alexnet进行训练,提高对测试集的检测成功率。

基础理论

https://pan.baidu.com/s/1yv5VIYLjoOPFEwuw1Abw2g

详细代码

import torch
import numpy as np
from torchvision.datasets import CIFAR10
from torch import nn
from torch.utils.data import DataLoader,TensorDataset

def data_tf(x):
    x = np.array(x,dtype='float32')/255
    x = (x-0.5)/0.5 #(-1,1) #标准化
    x = x.transpose(0, 3, 1, 2)#轴变换  (batch,32,32,3)->(batch,3,32,32) 以符合pytorch的输入
    x = torch.from_numpy(x) #转为tensor
    return x

o_train = CIFAR10('./data', train=True)
o_test = CIFAR10('./data', train=False)
train = data_tf(o_train.data).cuda() #(50000, 3, 32, 32)
train_label = torch.from_numpy(np.array(o_train.targets)).long().cuda() #[50000]
test = data_tf(o_test.data).cuda() #(10000,3,32,32)
test_label = torch.from_numpy(np.array(o_test.targets)).long().cuda() #[10000]
train_set = TensorDataset(train,train_label)
test_set = TensorDataset(test,test_label)
train_data = DataLoader(dataset=train_set,batch_size=64,shuffle=True)
test_data  = DataLoader(dataset=test_set,batch_size=64,shuffle=False)

#构建AlexNet网络结构  wout = (w - k +2p)/s + 1
#输入 (batch,3,32,32)->conv1(batch,64,28,28)->max_pool1(batch,64,13,13)->conv2(batch,64,9,9)
#->max_pool2(batch,64,4,4)->fc1(batch,384)->fc2(batch,192)->fc4(batch,10)
class AlexNet(torch.nn.Module):# 继承 torch 的 Module
    def __init__(self,):
        super(AlexNet, self).__init__()     # 继承 __init__ 功能
        # 第一层 5X5卷积,输入channels = 3,输出 channels= 64,步长 1,没有padding
        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=64, kernel_size=5, stride=1),
            nn.ReLU(inplace=True)#利用in-place计算可以节省内(显)存,同时还可以省去反复申请和释放内存的时间。但是会对原变量覆盖,只要不带来错误就用。
        )
        # 第二层 3x3的池化 步长为2 没有 padding
        self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
        # 第三层
        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=5, stride=1),
            nn.ReLU(True)
        )
        # 第四层 3x3的池化 步长2 无padding
        self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2)
        # 第五层 全连接层  输入 1204 ,输出 384
        self.fc1 = nn.Sequential(
            nn.Linear(1024,384),
            nn.ReLU(True)
        )
        # 第六层 全连接层  输入 384 ,输出 192
        self.fc2 = nn.Sequential(
            nn.Linear(384,192),
            nn.ReLU(True)
        )
        # 第七层全连接层 192-> 10
        self.fc3 = nn.Linear(192,10)

    def forward(self, x):   # 这同时也是 Module 中的 forward 功能
        x = self.conv1(x)
        x = self.max_pool1(x)
        x = self.conv2(x)
        x = self.max_pool2(x)
        # 将矩阵拉平
        x = x.view(x.shape[0],-1)#view():将一个多行的Tensor,拼接成一行  (batch,1024)
        x = self.fc1(x)
        x = self.fc2(x)
        x = self.fc3(x)
        return x
alexnet = AlexNet().cuda()
# print(alexnet)
#反向传播算法 SGD Adam等
optimizer = torch.optim.Adam(alexnet.parameters(), lr=1e-4)
#损失函数 交叉熵
criterion =	torch.nn.CrossEntropyLoss()

for i in range(20):
    train_loss = 0
    train_acc = 0
    alexnet.train() #网络设置为训练模式
    for tdata,tlabel in train_data:
        #tdata [64,3,32,32] tlabel [64]
        #前向传播
        y_ = alexnet(tdata)
        #记录单批次一次batch的loss
        loss = criterion(y_, tlabel)
        #反向传播
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        #累计单批次误差
        train_loss = train_loss + loss.item()
        #计算分类的准确率
        _, pred = y_.max(1)#求出每行的最大值 值与序号pred
        num_correct = (pred == tlabel).sum().item()
        acc = num_correct/tlabel.shape[0]
        train_acc = train_acc + acc

    # losses.append(train_loss/len(train_data))
    # acces.append(train_acc/len(train_data))
    print('epoch: {}, trainloss: {},trainacc: {}'.format(i, train_loss/len(train_data), train_acc/len(train_data)))

    #测试集进行测试
    eval_loss = 0
    eval_acc = 0
    alexnet.eval() #可加可不加
    for edata,elabel in test_data:
        #前向传播
        y_ = alexnet(edata)
        #记录单批次一次batch的loss,测试集就不需要反向传播更新网络了
        loss = criterion(y_, elabel)
        #累计单批次误差
        eval_loss = eval_loss + loss.item()
        #计算分类的准确率
        _, pred = y_.max(1)#求出每行的最大值 值与序号pred
        num_correct = (pred == elabel).sum().item()
        acc = num_correct/elabel.shape[0]
        eval_acc = eval_acc + acc

    print('epoch: {}, evalloss: {},evalacc: {}'.format(i, eval_loss/len(test_data), eval_acc/len(test_data)))

很多代码是沿用之前的代码,在以后的学习中,会把可以重复的代码提炼出来直接调用。最好要理解
#构建AlexNet网络结构 wout = (w - k +2p)/s + 1
#输入 (batch,3,32,32)->conv1(batch,64,28,28)->max_pool1(batch,64,13,13)->conv2(batch,64,9,9)
#->max_pool2(batch,64,4,4)->fc1(batch,384)->fc2(batch,192)->fc4(batch,10)
这一段的含义。明白数据的处理流程

结果展示与总结

epoch: 0, trainloss: 1.7304911044857385,trainacc: 0.36455003196930946
epoch: 0, evalloss: 1.5218798446047836,evalacc: 0.44416799363057324
epoch: 1, trainloss: 1.4093753750366933,trainacc: 0.4882712595907928
epoch: 1, evalloss: 1.3309362208008007,evalacc: 0.5208996815286624
。。。
epoch: 18, trainloss: 0.6127741164182459,trainacc: 0.7886229219948849
epoch: 18, evalloss: 0.8141072483579065,evalacc: 0.7256170382165605
epoch: 19, trainloss: 0.5917411257162728,trainacc: 0.795696131713555
epoch: 19, evalloss: 0.808465709731837,evalacc: 0.7269108280254777
从本节开始,往后的所有网络基本都只能靠gpu来进行运算了。
可以看见,通过alexnet模型,可以对cifar10数据集测试数据达到72%的准确率。

你可能感兴趣的:(Pytorch,卷积,深度学习)