PyTorch实现一个简单的神经网络

pytorch实现一个简单的神经网络

  • 1.numpy和pytorch实现梯度下降法
    • numpy
    • Pytorch
  • 2.numpy和pytorch实现线性回归
    • numpy实现线性回归
    • PyTorch实现线性回归
  • 3.PyTorch实现一个简单的神经网络

1.numpy和pytorch实现梯度下降法

  1. 设定初始值
  2. 求取梯度
  3. 在梯度方向上进行参数的更新

numpy

x = 0 
# 学习率
learning_rate= 0.1 
# 迭代次数
epochs = 20
# lambda函数定义一个简单的函数,假装是在算loss :)
y = lambda x: x**2 + 2*x +1 
for epoch in range(epochs):
    dx = 2*x +2 #梯度
    x = x - learning_rate*dx #在梯度上进行更新
    print('x:',x,'y:',y(x))

x: -0.2 y: 0.64
x: -0.36000000000000004 y: 0.40959999999999996
x: -0.488 y: 0.26214400000000004
x: -0.5904 y: 0.16777215999999995
x: -0.67232 y: 0.10737418239999996
x: -0.7378560000000001 y: 0.06871947673599998
x: -0.7902848 y: 0.043980465111040035
x: -0.83222784 y: 0.028147497671065613
x: -0.865782272 y: 0.018014398509481944
x: -0.8926258176 y: 0.011529215046068408
x: -0.9141006540800001 y: 0.0073786976294838436
x: -0.931280523264 y: 0.004722366482869611
x: -0.9450244186112 y: 0.0030223145490365644
x: -0.95601953488896 y: 0.0019342813113834012
x: -0.9648156279111679 y: 0.0012379400392853457
x: -0.9718525023289344 y: 0.0007922816251426656
x: -0.9774820018631475 y: 0.0005070602400912838
x: -0.981985601490518 y: 0.0003245185536584483
x: -0.9855884811924144 y: 0.00020769187434144243
x: -0.9884707849539315 y: 0.00013292279957843878

Pytorch

import torch
from torch.autograd import Variable

# 定义一个pytorch类型 且可自动求导的的初始值
x = torch.Tensor([0])# 定义一个tensor,相当于np.array
x = Variable(x,requires_grad=True) # x转变为一个variable,建立计算图的起点;开启requires_grad表示自动计算梯度
print('grad',x.grad,'data',x.data) # grad表示x的梯度属性,表明当前累计的梯度;data表示tensor值

lr = 0.1
epochs = 20

for epoch in range(epochs):
    # 设置计算图:建立一个函数y,以x为变量
    y = x ** 2 + 2 * x + 1
    # Variable 能自动求导==》requires_grad
    y.backward()  # 对y做反向传导==》自动计算梯度,由于当前变量为1个,所以不需要指定
    print('grad of epoch' + str(epoch) + ':', x.grad.data)

    x.data -= lr * x.grad.data
    # 在 pytorch 中梯度会累积,则每次需要清0
    x.grad.data.zero_()  # xx_表示对变量做inplace操作;此处将当前梯度清0
print(x.data)

grad None data tensor([0.])
grad of epoch0: tensor([2.])
grad of epoch1: tensor([1.6000])
grad of epoch2: tensor([1.2800])
grad of epoch3: tensor([1.0240])
grad of epoch4: tensor([0.8192])
grad of epoch5: tensor([0.6554])
grad of epoch6: tensor([0.5243])
grad of epoch7: tensor([0.4194])
grad of epoch8: tensor([0.3355])
grad of epoch9: tensor([0.2684])
grad of epoch10: tensor([0.2147])
grad of epoch11: tensor([0.1718])
grad of epoch12: tensor([0.1374])
grad of epoch13: tensor([0.1100])
grad of epoch14: tensor([0.0880])
grad of epoch15: tensor([0.0704])
grad of epoch16: tensor([0.0563])
grad of epoch17: tensor([0.0450])
grad of epoch18: tensor([0.0360])
grad of epoch19: tensor([0.0288])
tensor([-0.9885])

2.numpy和pytorch实现线性回归

numpy实现线性回归

import numpy as np
x_data = np.array([1, 2, 3])
y_data = np.array([2, 4, 6])

epochs = 10
lr = 0.01
w = 0
cost = []

for epoch in range(epochs):
    # 计算梯度
	yhat = x_data * w#设计的模型,用x预测y
	loss = np.average((yhat - y_data)**2)#最小二乘法求loss
	cost.append(loss)
	dw = -2*(y_data - yhat) @ x_data.T/(x_data.shape[0])
    #参数更新
	w = w - lr*dw
print(w)

1.2492307286934012

PyTorch实现线性回归

import torch
from torch.autograd import Variable

# 设置初始变量
x_data = Variable(torch.Tensor([[1], [2], [3]]))
y_data = Variable(torch.Tensor([[2], [4], [6]]))

epochs = 20
lr = 0.1
w = Variable(torch.FloatTensor([0]), requires_grad=True)  # requires_grad一定不要忘记设置
cost = []

for epoch in range(epochs):
    # 计算梯度
    yhat = x_data * w
    loss = torch.mean((yhat - y_data) ** 2)
    cost.append(loss.data.numpy())  # tensor转化为ndarray
    loss.backward()  # 计算loss偏导(仍用loss做目标优化函数)

    # 更新参数
    w.data -= lr * w.grad.data
    w.grad.data.zero_()
print(w.data)

tensor([2.])

3.PyTorch实现一个简单的神经网络

import torch


# class构建一个类,通过class Model写一个神经网络类
class Model(torch.nn.Module):
    # 初始化
    def __init__(self):
        super(Model, self).__init__()
        # super 用来返回Model的父类,在pytorch下定义的类都是继承一个大的父类torch.nn.Module的父类。
        # torch.nn.Module中包含了各种工具,一般我们都是写的都是子类,通过父类我们可以很容易书写子类。
        self.linear = torch.nn.Linear(1, 1, bias=False)
        # 建立一个linear类,bias表示偏置项,建立一个AX+b

        # forward 是torch.nn.Module定义好的模板,表示前向传播

    def forward(self, x):
        y_pred = self.linear(x)
        return y_pred
model = Model()  # 实例化类
criterion = torch.nn.MSELoss(reduction='mean') # 平方误差损失
optimizer = torch.optim.SGD(model.parameters(),lr=0.01) #利用梯度下降算法优化model.parameters
from torch.autograd import Variable

torch.manual_seed(2)
x_data = Variable(torch.Tensor([[1], [2], [3]]))
y_data = Variable(torch.Tensor([[2], [4], [6]]))

epochs = 50
cost = []
for epoch in range(epochs):
    # 建立计算图
    y_hat = model(x_data)  # 预测值
    loss = criterion(y_hat, y_data)
    cost.append(loss.data)
    optimizer.zero_grad()  # 对模型参数做一个优化,并且将梯度清0
    loss.backward()  # 计算梯度

    ## 参数更新
    optimizer.step()
print(list(model.parameters()))

[Parameter containing:
tensor([[1.9781]], requires_grad=True)]

你可能感兴趣的:(PyTorch)