pytorch 实现MLP(多层感知机)

pytorch 实现多层感知机,主要使用torch.nn.Linear(in_features,out_features),因为torch.nn.Linear是全连接的层,就代表MLP的全连接层

本文实例MNIST数据,输入层28×28=784个节点,2个隐含层,隐含层各100个,输出层10个节点

开发平台,windows 7平台,python 3.8.5,anaconda3 ,torch版本1.8.1+cpu

#minist 用MLP实现,MLP也是使用pytorch实现的
import torchvision
import torch
from torchvision import datasets, transforms
from torch.autograd import Variable
import torch.optim as optim
import time

transform = transforms.Compose([transforms.ToTensor(),
                               transforms.Normalize((0.5,),(0.5,))])

#下载数据集
data_train = datasets.MNIST(root = "..//data//",
                            transform=transform,
                            train = True,
                            download = True)

data_test = datasets.MNIST(root="..//data//",
                           transform = transform,
                           train = False,
                           download = True)
#装载数据
data_loader_train = torch.utils.data.DataLoader(dataset=data_train,
                                                batch_size = 64,
                                                shuffle = True)

data_loader_test = torch.utils.data.DataLoader(dataset=data_test,
                                               batch_size = 64,
                                               shuffle = True)

num_i=28*28 #输入层节点数
num_h=100   #隐含层节点数
num_o=10    #输出层节点数
batch_size=64

class Model(torch.nn.Module):
    

    def __init__(self,num_i,num_h,num_o):
        super(Model,self).__init__()
        
        self.linear1=torch.nn.Linear(num_i,num_h)
        self.relu=torch.nn.ReLU()
        self.linear2=torch.nn.Linear(num_h,num_h) #2个隐层
        self.relu2=torch.nn.ReLU()
        self.linear3=torch.nn.Linear(num_h,num_o)
  
    def forward(self, x):
        x = self.linear1(x)
        x = self.relu(x)
        x = self.linear2(x)
        x = self.relu2(x)
        x = self.linear3(x)
        return x

model=Model(num_i,num_h,num_o)
cost = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
epochs = 5
for epoch in range(epochs) :
    sum_loss=0
    train_correct=0
    for data in data_loader_train:
        inputs,labels=data #inputs 维度:[64,1,28,28]
    #     print(inputs.shape)
        inputs=torch.flatten(inputs,start_dim=1) #展平数据,转化为[64,784]
    #     print(inputs.shape)
        outputs=model(inputs)
        optimizer.zero_grad()
        loss=cost(outputs,labels)
        loss.backward()
        optimizer.step()

        _,id=torch.max(outputs.data,1)
        sum_loss+=loss.data
        train_correct+=torch.sum(id==labels.data)
    print('[%d,%d] loss:%.03f' % (epoch + 1, epochs, sum_loss / len(data_loader_train)))
    print('        correct:%.03f%%' % (100 * train_correct / len(data_train)))
    print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime()))
model.eval()
test_correct = 0
for data in data_loader_test :
    inputs, lables = data
    inputs, lables = Variable(inputs).cpu(), Variable(lables).cpu()
    inputs=torch.flatten(inputs,start_dim=1) #展并数据
    outputs = model(inputs)
    _, id = torch.max(outputs.data, 1)
    test_correct += torch.sum(id == lables.data)
print("correct:%.3f%%" % (100 * test_correct / len(data_test )))

输出结果如下:

[1,5] loss:0.213
        correct:93.482%
2021-12-08 18:24:49
[2,5] loss:0.134
        correct:95.898%
2021-12-08 18:25:01
[3,5] loss:0.102
        correct:96.818%
2021-12-08 18:25:13
[4,5] loss:0.084
        correct:97.323%
2021-12-08 18:25:24
[5,5] loss:0.071
        correct:97.750%
2021-12-08 18:25:40
correct:96.820%

其中使用2个隐含层,torch.flatten(inputs,start_dim=1) 是将维度[64,1,28×28]展平为维度[64,784]的数据,便于训练。当然这个MNIST的分类也可以用scklearn的MLPRegressor实现。

参考资料:

1 Pytorch 学习(五):Pytorch 实现多层感知机(MLP)_RememberUrHeart的博客-CSDN博客_多层感知机pytorch

你可能感兴趣的:(机器学习,python,pytorch,深度学习,机器学习)