Python 对MINST数据集进行分类处理

首先是单层神经网络的训练和测试

import numpy
import torchvision
import torch
from torchvision.datasets import MNIST
from torchvision import transforms
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt


mnist_train = MNIST(root="./data", train=True, download=True, transform=transforms.ToTensor())#训练集
mnist_test = MNIST(root="./data", train=False, download=True, transform=transforms.ToTensor())#测试集
train_loader = DataLoader(mnist_train, batch_size=400, shuffle=True)#设置一个batch的大小为400,分批次进行,batch的大小与显卡性能有关,shuffle=True打乱顺序
test_loader = DataLoader(mnist_test, batch_size=400, shuffle=True)


class Softmax(nn.Module):
    def __init__(self):
        super(Softmax, self).__init__()
        self.l1 = nn.Linear(28*28, 10)#输入28*28大小的图片,输出10维
        self.softmax = nn.Softmax(dim=1)#Softmax激活函数,特点是相加=1

    def forward(self, x):
        output = x.view(x.size()[0], -1)
        #output = x.view(-1, 784)
        output = self.l1(output)
        output = self.softmax(output)
        return output


model = Softmax()
criterion = nn.CrossEntropyLoss()#交叉熵损失函数---分类
optimizer = optim.SGD(model.parameters(), lr=0.05)

device = torch.device("cuda:0")
# model.to(device)

loss_list = []
acc_list = []


def train():
    for i, data in enumerate(train_loader):#enumerate枚举(列表/字典/枚举/字符串)
        inputs, labels = data
        inputs = Variable(inputs, requires_grad=False)
        labels = Variable(labels, requires_grad=False)
        # inputs = inputs.to(device)
        # labels = labels.to(device)
        output = model(inputs)
        #one-hot vector独热编码
        labels = labels.reshape(-1, 1)#-1表示列数为1,自动计算行数→300*1的矩阵reshape(-1,5) 60行
        one_hot = torch.zeros(inputs.shape[0], 10)#生成全0矩阵
        # one_hot = one_hot.to(device)
        one_hot = one_hot.scatter(1, labels, 1)#相应位置设为1
        # labels = torch.zeros(inputs.shape[0], 10).scatter(1, labels, 1)
        loss = criterion(output, one_hot)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    loss_list.append(loss.item())
    # print('loss :', loss.item())

    print(loss.item())


def test():
    correct = 0
    for i, data in enumerate(test_loader):
        inputs, labels = data
        inputs = Variable(inputs, requires_grad=False)
        labels = Variable(labels, requires_grad=False)
        # inputs = inputs.to(device)
        # labels = labels.to(device)
        output = model(inputs)
        value, predict_tensor = torch.max(output, dim=1)
        correct += (predict_tensor == labels).sum()

    acc = correct.item()/len(mnist_test)
    print("Test acc:{}".format(acc))
    acc_list.append(acc)


for epoch in range(30):
    print('第{}次訓練:'.format(epoch))
    train()
    test()

print(loss_list[-1])
print(acc_list[-1])

plt.plot([i for i in range(len(loss_list))], loss_list)
plt.show()
plt.plot([i for i in range(30)], acc_list)
plt.show()

多层神经网络(这里给出的代码是一个隐含层)

import numpy
import torchvision
import torch
from torchvision.datasets import MNIST
from torchvision import transforms
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt


mnist_train = MNIST(root="./data", train=True, download=True, transform=transforms.ToTensor())#训练集
mnist_test = MNIST(root="./data", train=False, download=True, transform=transforms.ToTensor())#测试集
train_loader = DataLoader(mnist_train, batch_size=400, shuffle=True)
test_loader = DataLoader(mnist_test, batch_size=400, shuffle=True)


class Softmax(nn.Module):
    def __init__(self):
        super(Softmax, self).__init__()
        self.l1 = nn.Linear(28*28, 256)
        self.sig = nn.Sigmoid()
        self.h = nn.Linear(256, 10)#隐藏层:输入28*28【即256维】,输出10维
        self.softmax = nn.Softmax(dim=1)#Softmax激活函数

    def forward(self, x):
        output = x.view(x.size()[0], -1)
        output_h = self.l1(output)
        output_h = self.sig(output_h)#隐藏层的输出
        output = self.h(output_h)
        output = self.softmax(output)#最后输出
        return output

model = Softmax()
criterion = nn.CrossEntropyLoss()#交叉熵损失函数---分类
optimizer = optim.SGD(model.parameters(), lr=0.05)

device = torch.device("cuda:0")
# model.to(device)

loss_list = []
acc_list = []


def train():
    for i, data in enumerate(train_loader):
        inputs, labels = data
        inputs = Variable(inputs, requires_grad=False)
        labels = Variable(labels, requires_grad=False)
        output = model(inputs)
        #one-hot vector独热编码
        labels = labels.reshape(-1, 1)
        one_hot = torch.zeros(inputs.shape[0], 10)#生成全0矩阵
        # one_hot = one_hot.to(device)
        one_hot = one_hot.scatter(1, labels, 1)#相应位置设为1
        # print(type(one_hot))
        # labels = torch.zeros(inputs.shape[0], 10).scatter(1, labels, 1)
        loss = criterion(output, one_hot)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    loss_list.append(loss.item())
    # print('loss :', loss.item())

    print(loss.item())


def test():
    correct = 0
    for i, data in enumerate(test_loader):
        inputs, labels = data
        inputs = Variable(inputs, requires_grad=False)
        labels = Variable(labels, requires_grad=False)
        # inputs = inputs.to(device)
        # labels = labels.to(device)
        output = model(inputs)
        value, predict_tensor = torch.max(output, dim=1)
        correct += (predict_tensor == labels).sum()

    acc = correct.item()/len(mnist_test)
    print("Test acc:{}".format(acc))
    acc_list.append(acc)


for epoch in range(30):
    print('第{}次訓練:'.format(epoch))
    train()
    test()

print(loss_list[-1])
print(acc_list[-1])

plt.plot([i for i in range(len(loss_list))], loss_list)
plt.show()
plt.plot([i for i in range(30)], acc_list)
plt.show()

无论是单层神经网络,还是多层神经网络,基本顺序都是:

导入数据集 → 建模(重写方法) → 构造对象model → 损失函数criterion(预测用均方差;分类用交叉熵) → 梯度下降函数optimizer  → 训练(计算损失loss 梯度清零optimizer.zero_grand() 反向传播loss.backward() 权重更新optimizer.step()) → 测试

你可能感兴趣的:(python,分类,开发语言)