MLP感知机python实现

from torch import nn
from softmax回归 import train_ch3
import torch
import torchvision
from torch.utils import data
from torchvision import transforms

# ①准备数据集
def load_data_fashion_mnist(batch_size, resize=None):
    # PyTorch中的一个转换函数,它的作用是将一个PIL Image或numpy.ndarray图像转换为一个Tensor数据类型。
    trans = [transforms.ToTensor()]
    # 是否需要改变大小
    if resize:
        trans.insert(0, transforms.Resize(resize))
    # 函数compose将这些转换操作组合起来
    trans = transforms.Compose(trans)
    # 训练数据
    mnist_train = torchvision.datasets.FashionMNIST(
        root="../data", train=True, transform=trans, download=True)
    # 测试数据
    mnist_test = torchvision.datasets.FashionMNIST(
        root="../data", train=False, transform=trans, download=True)
    # 返回值
    return (torch.utils.data.DataLoader(mnist_train, batch_size, shuffle=True,
                                        num_workers=4),
            torch.utils.data.DataLoader(mnist_test, batch_size, shuffle=False,
                                        num_workers=4))
# 批量大小为256
batch_size = 256
# 获取训练数据集和测试数据集
train_iter, test_iter = load_data_fashion_mnist(batch_size)

# ②实现一个具有单隐藏层的多层感知机,它包含256个隐藏单元
# 定义输入,输出,隐藏层大小
num_inputs, num_outputs, num_hiddens = 784, 10, 256
# 定义W1、b1、W2、b2
W1 = nn.Parameter(
    torch.randn(num_inputs, num_hiddens, requires_grad=True) * 0.01)
b1 = nn.Parameter(torch.zeros(num_hiddens, requires_grad=True))
W2 = nn.Parameter(
    torch.randn(num_hiddens, num_outputs, requires_grad=True) * 0.01)
b2 = nn.Parameter(torch.zeros(num_outputs, requires_grad=True))

params = [W1, b1, W2, b2]

# ③实现ReLU激活函数
def relu(X):
    a = torch.zeros_like(X)
    return torch.max(X, a)

# ④实现模型
def net(X):
    # x=256*784
    X = X.reshape((-1, num_inputs))
    # torch.matmul(X,W1)= 256*256
    H = relu(torch.matmul(X,W1) + b1)
    # torch.matmul(H,W2) = 256*10
    return (torch.matmul(H,W2) + b2)

# ⑤定义损失函数
loss = nn.CrossEntropyLoss()

# ⑥训练
# 定义学习率
lr =  0.1
# 优化函数
updater = torch.optim.SGD(params, lr=lr)

# 训练
if __name__ == '__main__':
    num_epochs = 10
    train_ch3(net, train_iter, test_iter, loss, num_epochs, updater)


训练结果

训练损失:0.0015049066459139188
训练精度:0.86405
测试精度:0.8453

貌似是比softmax十次好一些

你可能感兴趣的:(python,开发语言,机器学习)