Datawhale——Pytorch基础(三)

PyTorch实现Logistic regression

任务

  1. PyTorch基础实现代码
  2. 用PyTorch类实现Logistic regression,torch.nn.module写网络结构

参考资料

新手必备 | 史上最全的PyTorch学习资源汇总

快速上手笔记,PyTorch模型训练实用教程(附代码)

PyTorch学习笔记

《深度学习框架PyTorch:入门与实践》的对应代码

PyTorch 中文文档

参考答案

作业

  1. 用PyTorch类实现Logistic regression
  • 导入模块
import torch as t
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch as t
from torch.autograd import Variable
device=t.device('cpu')# 使用CPU计算
 
  • 生成训练数据函数
def get_fake_data(batch_size=8):
    """
    产生随机数据:y = x * 2+3,加上了一些噪声
    """
    x = t.rand(batch_size, 1,device=device) * 5
    y = x * 2 + 3 + t.rand(batch_size, 1)*2
#     x = Variable(t.Tensor([[1.0],[2.0],[3.0],[4.0]]))
#     y = Variable(t.Tensor([[0.0],[0.0],[1.0],[1.0]]))    
    return x, y
x,y = get_fake_data()
w = Variable(t.Tensor([-1]), requires_grad=True)
b = Variable(t.Tensor([0]), requires_grad=True)
w.data,b 
(tensor([-1.]), tensor([0.], requires_grad=True))
epochs = 100
costs = []
lr = 0.001
print("开始训练。。。 ")
print("x = 1.5,y_pred  = ",float( w.data * 1.5 + b.data>0 ))
开始训练。。。 
x = 1.5,y_pred  =  0.0
#训练模型
for epoch in range(epochs):
    A= 1 / (1 + t.exp(-( w * x + b)))
    J= - t.mean( x * t.log(A)+( 1 - x ) * t.log( 1 - A ))
    costs.append(J.data.numpy())
    J.backward()

    w.data  = w.data - lr * w.grad.data
    b.data  = b.data - lr * b.grad.data
    w.grad.data.zero_()
    b.grad.data.zero_()

w.data
tensor([-0.6612])
print('训练结束,')
print('y_pred = ',float(w.data*1.5+b.data>0))
print(w.data,b.data)
训练结束,
y_pred =  0.0
tensor([-0.6612]) tensor([0.1344])

用PyTorch类实现torch.nn.module写网络结构

import torch as t
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from torch.autograd import Variable
device=t.device('cpu')# 使用CPU计算
  • 生成训练数据函数
t.manual_seed(2)
def get_fake_data(batch_size=8):
    """
    产生随机数据:y = x * 2+3,加上了一些噪声
    """
#     x = t.rand(batch_size, 1,device=device) * 5
#     y = x * 2 + 3 + t.rand(batch_size, 1)*2
    x = Variable(t.Tensor([[1.0],[2.0],[3.0],[4.0]]))
    y = Variable(t.Tensor([[0.0],[0.0],[1.0],[1.0]]))    
    return x, y
x,y = get_fake_data()
  • 定义网络模型
# 先建立一个基类模型 都是从父类torch.nn.module中继承过来的,Pytorch写网络的固定写法
class Model(t.nn.Module):
    def __init__(self):
        super(Model,self).__init__()# 初始化父类
        self.linear = t.nn.Linear(1,1)# 输入维度和输出维度都为1
        
    def forward(self,x):
        y_pred = self.linear(x)
        return y_pred

model = Model() # 实例化

  • 定义Loss 和优化方法

criterion = t.nn.BCEWithLogitsLoss()#封装好的损失函数 
optimizer = t.optim.SGD(model.parameters(),lr = 0.01) #采用随机梯度下降 优化器

hour_var = Variable(t.Tensor([[2.5]]))
y_pred = model(hour_var)

  • 模型训练
epochs = 40
for epoch in range(epochs):
    # 计算梯度 and 损失函数
    y_pred = model(x) # 将x输入到模型中
    loss = criterion(y_pred,y)
    print("epoch = ",epoch + 1,loss.data)
    optimizer.zero_grad()# 梯度清零
    loss.backward()# 反向传播
    optimizer.step()#优化迭代

epoch =  1 tensor(0.6004)
epoch =  2 tensor(0.5998)
epoch =  3 tensor(0.5993)
epoch =  4 tensor(0.5987)
epoch =  5 tensor(0.5982)
...
  • 模型预测
hour_var = Variable(t.Tensor([[4.0]]))
y_pred = model(hour_var) # 预测结果
y_pred.item()
0.9182039499282837
loss
tensor(0.5845, grad_fn=)

你可能感兴趣的:(深度学习,Pytorch)