零基础LSTM入门示例pytorch

最近用pytorch搭了个LSTM模型,由于博主两个都没基础,所以查来查去兜了不少圈子,干脆总结一个极简的LSTM代码示例,供参考

仅使用了torch.nn.Module自定义模型

随便挑了accuracy_score作为评估指标

以及,特征和标签是随便打的,不要指望它收敛ㄟ( ▔, ▔ )ㄏ

 

定义模型结构:

import torch
from sklearn.metrics import accuracy_score

#定义需要的模型结构,继承自torch.nn.Module
#必须包含__init__和forward两个功能
class mylstm(torch.nn.Module):
    def __init__(self, lstm_input_size, lstm_hidden_size, lstm_batch, lstm_layers):
        # 声明继承关系
        super(mylstm, self).__init__()

        self.lstm_input_size, self.lstm_hidden_size = lstm_input_size, lstm_hidden_size
        self.lstm_layers, self.lstm_batch = lstm_layers, lstm_batch

        # 定义lstm层
        self.lstm_layer = torch.nn.LSTM(self.lstm_input_size, self.lstm_hidden_size, num_layers=self.lstm_layers, batch_first=True)
        # 定义全连接层 二分类
        self.out = torch.nn.Linear(self.lstm_hidden_size, 2)

    def forward(self, x):
        # 激活
        x = torch.sigmoid(x)
        # LSTM
        x, _ = self.lstm_layer(x)
        # 保留最后一步的输出
        x = x[:, -1, :]
        # 全连接
        x = self.out(x)
        return x

    def init_hidden(self):
        #初始化隐藏层参数全0
        return torch.zeros(self.lstm_batch, self.lstm_hidden_size)

数据集特征和标签:

#训练集特征
train_feature = [
    [[0.1, 0.2, 0.3, 0.4], [0.1, 0.2, 0.3, 0.4], [0.1, 0.2, 0.3, 0.4]],
    [[0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1], [0.4, 0.3, 0.2, 0.1]],
    [[0.2, 0.3, 0.5, 0.8], [0.2, 0.3, 0.5, 0.8], [0.2, 0.3, 0.5, 0.8]],
    [[0.7, 0.6, 0.5, 0.4], [0.7, 0.6, 0.5, 0.4], [0.7, 0.6, 0.5, 0.4]],
    [[0.1, 0.3, 0.5, 0.7], [0.1, 0.3, 0.5, 0.7], [0.1, 0.3, 0.5, 0.7]],
    [[0.5, 0.4, 0.2, 0.1], [0.5, 0.4, 0.2, 0.1], [0.5, 0.4, 0.2, 0.1]],
    [[0.2, 0.4, 0.6, 0.8], [0.2, 0.4, 0.6, 0.8], [0.2, 0.4, 0.6, 0.8]],
    [[0.7, 0.6, 0.3, 0.2], [0.7, 0.6, 0.3, 0.2], [0.7, 0.6, 0.3, 0.2]]]
#测试集特征
test_feature = [
    [[0.3, 0.4, 0.6, 0.8], [0.3, 0.4, 0.6, 0.8], [0.3, 0.4, 0.6, 0.8]],
    [[0.9, 0.6, 0.3, 0.2], [0.9, 0.6, 0.3, 0.2], [0.9, 0.6, 0.3, 0.2]]]
#训练集、测试集标签
train_label = [1, 0, 1, 0, 1, 0, 1, 0]
test_label = [1, 0]

模型的定义、训练、测试:

#预定义模型参数
dataset_batch_size = 2
learning_rate = 0.001
lstm_input_size = 4
lstm_hidden_size = 4
lstm_batch = 2
lstm_layers = 1

#定义模型
model = mylstm(lstm_input_size, lstm_hidden_size, lstm_batch, lstm_layers)
#初始化隐藏层
hidden = model.init_hidden()
#定义损失函数用交叉熵
criterion = torch.nn.CrossEntropyLoss()
#定义用Adam算法梯度下降
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

for epoch in range(5):
    #避免保存的计算图过大,进行梯度清零
    optimizer.zero_grad()
    for i in range((len(train_feature) // dataset_batch_size) - 1):
        #训练
        model.train()
        #取一个batch并转化为张量
        x = train_feature[i * dataset_batch_size:(i + 1) * dataset_batch_size]
        x = torch.tensor(x)
        y = train_label[i * dataset_batch_size:(i + 1) * dataset_batch_size]
        y = torch.tensor(y)
        # 前馈
        y_pred = model(x)
        loss = criterion(y_pred, y)
        # 反馈
        optimizer.zero_grad()
        loss.backward()
        # 更新
        optimizer.step()

    #测试
    model.eval()
    test_feature = torch.tensor(test_feature)
    out = model(test_feature)
    predict_result = torch.argmax(out, dim=1)
    #计算准确率
    acc = accuracy_score(test_label, predict_result)
    print('epoch:', epoch+1, ' acc:', acc)

 

你可能感兴趣的:(python,深度学习,深度学习,lstm)