pytorch搭建全连接神经网络

import torch
import matplotlib.pyplot as plt
import torch.nn as nn
import numpy as np
import pandas as pd
import torch.nn.functional as F
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tqdm import tqdm
torch.manual_seed(10)#固定每次初始化模型的权重
training_step = 500#迭代此时
batch_size = 512#每个批次的大小
n_features = 32#特征数目
M = 10000#生成的数据数目
#生成数据
data = np.random.randn(M,n_features)#随机生成服从高斯分布的数据
target = np.random.rand(M)

#特征归一化
min_max_scaler = MinMaxScaler()
min_max_scaler.fit(data)
data = min_max_scaler.transform(data)

# 对训练集进行切割,然后进行训练
x_train,x_val,y_train,y_val = train_test_split(data,target,test_size=0.2,shuffle=False)

#定义网络结构
class Net(torch.nn.Module):  # 继承 torch 的 Module

    def __init__(self, n_features):
        super(Net, self).__init__()     # 继承 __init__ 功能
        self.l1 = nn.Linear(n_features,500)#特征输入
        self.l2 = nn.ReLU()#激活函数
        self.l3 = nn.BatchNorm1d(500)#批标准化
        self.l4 = nn.Linear(500,250)
        self.l5 = nn.ReLU()
        self.l6 = nn.BatchNorm1d(250)
        self.l7 = nn.Linear(250,1)
        #self.l8 = nn.Sigmoid()
    def forward(self, inputs):   # 这同时也是 Module 中的 forward 功能
        # 正向传播输入值, 神经网络分析出输出值
        out = torch.from_numpy(inputs).to(torch.float32)#将输入的numpy格式转换成tensor
        out = self.l1(out)
        out = self.l2(out)
        out = self.l3(out)
        out = self.l4(out)
        out = self.l5(out)
        out = self.l6(out)
        out = self.l7(out)
        #out = self.l8(out)
        return out


#定义模型
model = Net(n_features=n_features)

#定义优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)  # 传入 net 的所有参数, 学习率
#定义目标损失函数
loss_func = torch.nn.MSELoss() #这里采用均方差函数

#开始迭代
for step in range(training_step):
    M_train = len(x_train)
    with tqdm(np.arange(0,M_train,batch_size), desc='Training...') as tbar:
        for index in tbar:
            L = index
            R = min(M_train,index+batch_size)
            #-----------------训练内容------------------
            train_pre = model(x_train[L:R,:])     # 喂给 model训练数据 x, 输出预测值
            train_loss = loss_func(train_pre, torch.from_numpy(y_train[L:R].reshape(R-L,1)).to(torch.float32))
            val_pre = model(x_val)
            val_loss = loss_func(val_pre, torch.from_numpy(y_val.reshape(len(y_val),1)).to(torch.float32))
            #-------------------------------------------
            tbar.set_postfix(train_loss=float(train_loss.data),val_loss=float(val_loss.data))#打印在进度条上
            tbar.update()  # 默认参数n=1,每update一次,进度+n

            #-----------------反向传播更新---------------
            optimizer.zero_grad()   # 清空上一步的残余更新参数值
            train_loss.backward()         # 以训练集的误差进行反向传播, 计算参数更新值
            optimizer.step()        # 将参数更新值施加到 net 的 parameters 上


你可能感兴趣的:(深度学习,pytorch,神经网络,深度学习)