李沐深度学习-ch4 dropout 暂退法

#dropout
import torch
from torch import nn
from d2l import torch as d2l

def dropout_layer(X, dropout):
    # X is input vector and dropout is proability. the fun gives a randomly vector from uniform distribution on [0, 1] shaped like X and if the value < dropout drop it else change it into x/(1-dropout)
    assert 0 <= dropout <= 1 # assert: if dropout < 0 or dropout > 1 -> error
    if dropout == 1:
        return torch.zeros_like(X)
    if dropout == 0:
        return X
    mask = (torch.rand(X.shape) > dropout).float()
    return mask * X / (1.0 - dropout)

dropout1, dropout2 = 0.2, 0.5
num_inputs, num_outputs, num_hiddens1, num_hiddens2 = 784, 10, 256, 256
class Net(nn.Module):
    def __init__(self, num_inputs, num_outputs, num_hiddens1, num_hiddens2, is_training):
        super(Net, self).__init__()
        self.num_inputs = num_inputs
        #self.num_outputs = num_outputs
        #self.num_hiddens1 = num_hiddens1
        #self.num_hiddens2 = num_hiddens2
        self.is_training = is_training
        self.lin1 = nn.Linear(num_inputs, num_hiddens1)
        self.lin2 = nn.Linear(num_hiddens1, num_hiddens2)
        self.lin3 = nn.Linear(num_hiddens2, num_outputs)
        self.relu = nn.ReLU()
    def forward(self, X):
        H1 = self.relu(self.lin1(X.reshape(-1, self.num_inputs)))
        if self.is_training == True:
            H1 = dropout_layer(H1, dropout1)
        H2 = self.relu(self.lin2(H1))
        if self.is_training == True:
            H2 = dropout_layer(H2, dropout2)
        return self.lin3(H2)

net_train = Net(num_inputs, num_outputs, num_hiddens1, num_hiddens2, True)

num_epochs, lr, batch_size = 10, 0.5, 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
loss = nn.CrossEntropyLoss(reduction="none")
trainer = torch.optim.SGD(net_train.parameters(), lr)

d2l.train_ch3(net_train, train_iter, test_iter, loss, num_epochs, trainer)

你可能感兴趣的:(DL基础,深度学习,python,人工智能)