新手必备 | 史上最全的PyTorch学习资源汇总
快速上手笔记,PyTorch模型训练实用教程(附代码)
PyTorch学习笔记
《深度学习框架PyTorch:入门与实践》的对应代码
PyTorch 中文文档
x = 1
learning_rate = 0.0001 # 设置学习率
epochs = 10 # 设置迭代次数
y = lambda x : x**2 + 2*x + 1 #设置一个简单的抛物线函数
for epoch in range(epochs):
dx = 2*x + 2 # y的导数
x = x - learning_rate *dx #梯度下更新
print(x)
0.9960035980806718
import torch # 导入PyTorch包
from torch.autograd import Variable #调用自动求导的函数类
x = torch.Tensor([1])# 定义一个torch.float tensor,相当于一个np.array 其中[1]只是一个列表,
#torch.Tensor 就是建立一个张量,也就是和numpy中的矩阵一样,
#numpy.array有的运算torch.tensor都有
# x = Variable(x,requires_grade = Ture) # 定义一个w,w是一个variable建立一个计算图的起点
print("grad",x.grad,"data",x.data) # grad和data是variable的两个属性增加了一个gradient部分,
# x.data就是前面的x的tensor
# Pytorch的本质就是建立一个计算图,自动求导
learning_rate = 0.00001
epochs = 10
for epoch in range(epochs):
# 设置一个计算图,建立一个函数,以x为变量
y = x**2 + 2*x + 1
# variable能够自动求导
y.requires_grad = True
y.backward()# 对y做反向求导 反向求导之后就有grad
print("grad",y.grad)
# 在梯度中,进行参数更新
x.data = x.data - learning_rate * y.grad
# 在Pytorch中梯度不清零就会积累
y.grad.data.zero_()
print(x.data)
import numpy as np#导入包
# 建立自己的数据
x_data = np.array([1,2,3])
y_data = np.array([2,4,6])
epochs = 1000
lr = 0.001
w = 0
cost = []
for epoch in range(epochs):
# 计算梯度
y_hat = x_data * w #设计的模型,用x预测y
loss = np.average((y_hat - y_data) ** 2)# 最小二乘法求loss
cost.append(loss)
dw = -2 * (y_data - y_hat) @ x_data.T/(x_data.shape[0])
# 参数更新
w = w - lr * dw
print(w)
1.9998307298556592
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# 设置超参数
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001
# 初始化数据集
x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
[9.779], [6.182], [7.59], [2.167], [7.042],
[10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
[3.366], [2.596], [2.53], [1.221], [2.827],
[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)
# 初始化线性模型
model = nn.Linear(input_size,output_size)
# 初始化损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(),lr =learning_rate)
# 训练模型
for epoch in range(num_epochs):
# 转换np.array为tensor
inputs = torch.from_numpy(x_train)
targets = torch.from_numpy(y_train)
outputs = model(inputs)
loss = criterion(outputs,targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch + 1 ) % 5 == 0:
print("Epoch[{}/{}],loss:{:.4f}".format(epoch + 1, num_epochs,loss.item()))
# Plot the graph
predicted = model(torch.from_numpy(x_train)).detach().numpy()
plt.plot(x_train, y_train, 'ro', label='Original data')
plt.plot(x_train, predicted, label='Fitted line')
plt.legend()
plt.show()
# Save the model checkpoint
torch.save(model.state_dict(), 'linear.ckpt')
Epoch[5/60],loss:0.2663
Epoch[10/60],loss:0.2087
Epoch[15/60],loss:0.1854
Epoch[20/60],loss:0.1759
Epoch[25/60],loss:0.1721
Epoch[30/60],loss:0.1705
Epoch[35/60],loss:0.1699
Epoch[40/60],loss:0.1696
Epoch[45/60],loss:0.1695
Epoch[50/60],loss:0.1695
Epoch[55/60],loss:0.1695
Epoch[60/60],loss:0.1695
# -*- coding: utf-8 -*-
import torch
dtype = torch.float
device = torch.device("cpu")
# N batch_size; D_in 输入维度;
# H隐藏层维度; D_out 输出维度.
N, D_in, H, D_out = 64, 1000, 100, 10
# 生成数据.
x = torch.randn(N, D_in, device=device, dtype=dtype)
y = torch.randn(N, D_out, device=device, dtype=dtype)
# 生成权重.
w1 = torch.randn(D_in, H, device=device, dtype=dtype, requires_grad=True)
w2 = torch.randn(H, D_out, device=device, dtype=dtype, requires_grad=True)
learning_rate = 1e-6
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum()
print(t, loss.item())
#利用backward命令进行向后传播,loss.backward()将会计算所有与loss有关且requires_grad=True的变量的梯度
#在本例中将会计算loss对w1和w2的梯度,并可由w1.grad和w2.grad获取
loss.backward()
#w1和w2都是requires_grad=Ture,但是在更新w1和w2的时候我们并不希望计算这一步骤的梯度
#因此在更新w时,要用到torch.no_grad()
#这一步也可以通过torch.optim.SGD自动实现
with torch.no_grad():
w1 -= learning_rate * w1.grad
w2 -= learning_rate * w2.grad
# 在更新完权重后,将梯度值进行重置
w1.grad.zero_()
w2.grad.zero_()
0 30793478.0
1 24614226.0
2 19491514.0
3 14447308.0
4 10014916.0
...