pytorch加载自定义自定义类型数据并且rnn训练

import torch
from torch.utils.data import Dataset
from torch.utils.data import DataLoader

from torchvision import transforms
from torchvision import datasets
import  torch.nn as nn
import torch.optim as optim
import scipy.io as scio
torch.set_default_tensor_type(torch.DoubleTensor)

num_time_steps = 50
input_size = 32
hidden_size = 16
output_size = 1
lr = 0.01

class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.rnn = nn.RNN(
            input_size=input_size,
            hidden_size=hidden_size,
            num_layers=1,
            batch_first=True,
        )
        self.linear = nn.Linear(hidden_size,output_size)

    def forward(self,x,hidden_prev):
        out,hidden_prev = self.rnn(x,hidden_prev)

        out = out.view(-1,hidden_size)
        out = self.linear(out)
        out = out.unsqueeze(dim=0)
        return out,hidden_prev



class DiabetesDataset(Dataset):
    def __init__(self):
        self.x_data = scio.loadmat("tezhengxx.mat")['tezhengxx'].T
        self.y_data = scio.loadmat("biaoqian.mat")['shuzi'].T
        self.len = len(self.x_data)

    def __getitem__(self, index):
        return self.x_data[index],self.y_data[index]

    def __len__(self):
        return self.len


dataset = DiabetesDataset()
train_loader = DataLoader(dataset=dataset,batch_size=1,shuffle=True,num_workers=0)


if __name__ == '__main__':


    model = Net()
    model = model.double()
    criterion = nn.MSELoss()
    optimizer = optim.Adam(model.parameters(), lr)
    hidden_prev = torch.zeros(1, 1, hidden_size)

for epoch in range(400):
    for i,data in enumerate(train_loader,0):
        # print(i)
        data[0] = data[0].view(1,1,32)
        #1 * 32  1 * 1
        data[0] = data[0].to(torch.double)
        data[1] = data[1].to(torch.double)
        output,hidden_prev = model(data[0],hidden_prev)
        hidden_prev = hidden_prev.detach()
        data[1] = data[1].unsqueeze(dim=0)

        loss = criterion(output,data[1])
        model.zero_grad()
        loss.backward()
        optimizer.step()

        if epoch % 10 == 0:
            print("Iteration:{} loss{}".format(epoch,loss.item()))









你可能感兴趣的:(pytorch,pytorch,rnn,深度学习)