深度学习代码实践

文章目录

  • 一、反向传播y=w1 x^2+w2 x+b
  • 二、线性回归
    • 1.Pytorch实现线性回归
    • 2.比较不同优化器下的线性回归并可视化
  • 三、多维输入的逻辑斯蒂回归(sigmoid)
  • 四、多分类问题
  • 五、CNN


一、反向传播y=w1 x^2+w2 x+b

#训练y=w1x^2+w2x+b

import numpy as np
import torch
import random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

x_data=[1.0,2.0,3.0]
y_data=[2.0,4.0,6.0]

w_1=torch.tensor(1.0,requires_grad=True)
w_2=torch.tensor(1.0,requires_grad=True)
b=torch.tensor(1.0,requires_grad=True)

def forward(x):
    return x**2*w_1+x*w_2+b

def loss(x,y):
    y_pred=forward(x)
    return (y-y_pred)**2

print('Predict (before training)',4,round(forward(4).item(),2))
epochs=[]
costs=[]
for epoch in range(100):
    epochs.append(epoch)
    for x,y in zip(x_data,y_data):
        l=loss(x,y)
        l.backward()
        print('\tgrad: ',x,y,round(w_1.grad.item(),2),round(w_2.grad.item(),2),round(b.grad.item(),2))
        w_1.data-=0.01*w_1.grad.item()
        w_2.data-=0.01*w_2.grad.item()
        b.data-=0.01*b.grad.item()

        w_1.grad.data.zero_()
        w_2.grad.data.zero_()
        b.grad.data.zero_()
    costs.append(l.item())
    print('progress:',epoch,l.item())

print('Predict (after training)',4,round(forward(4).item(),2))

plt.plot(epochs,costs)
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.show()

二、线性回归

1.Pytorch实现线性回归

使用Pytorch构建训练模型的一般步骤:
1.准备数据集
2.使用相关类构建模型,用以计算预测值
3.使用Pytorch的应用接口来构建损失函数和优化器
4.编写循环迭代的训练过程——前向计算,反向传播和梯度更新

import numpy as np
import torch
import random
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D

x_data=torch.tensor([[1.0],[2.0],[3.0]])
y_data=torch.tensor([[2.0],[4.0],[6.0]])

class LinearModel(torch.nn.Module):
    def __init__(self):
        super(LinearModel,self).__init__()
        self.linear=torch.nn.Linear(1,1)#输入输出的维度均为1

    def forward(self,x):
        y_pred=self.linear(x)
        return y_pred

model=LinearModel()
criterion=torch.nn.MSELoss(size_average=False)
optimizer=torch.optim.SGD(model.parameters(),lr=0.01)

epochs=[]
costs=[]

for epoch in range(100):
    epochs.append(epoch)
    y_pred=model(x_data)
    loss=criterion(y_pred,y_data)
    costs.append(loss.item())
    print(epoch,loss)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

print('w=',model.linear.weight.item())
print('b=',model.linear.bias.item())

x_test=torch.Tensor([[4.0]])
y_test=model(x_test)
print('y_pred=',y_test.data)

plt.plot(epochs,costs)
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.show()

总结
1.nn.Linear类中对__call()__方法进行了实现,且其内部有对函数forward()的调用,故在定义模型时需要对forward()函数进行实现。
2.nn.Linear类相当于是对线性计算单元的封装,里面包含两个张量类型的成员:权重和偏置项
在这里插入图片描述

2.比较不同优化器下的线性回归并可视化

import numpy as np
import torch
import random
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D

x_data=torch.tensor([[1.0],[2.0],[3.0]])
y_data=torch.tensor([[2.0],[4.0],[6.0]])

class LinearModel(torch.nn.Module):
    def __init__(self):
        super(LinearModel,self).__init__()
        self.linear=torch.nn.Linear(1,1)#输入输出的维度均为1

    def forward(self,x):
        y_pred=self.linear(x)
        return y_pred

model1=LinearModel()
model2 = LinearModel()
model3 = LinearModel()
model4 = LinearModel()
model5 = LinearModel()
#model6 = LinearModel()
model7 = LinearModel()
model8 = LinearModel()
models = [model1,model2,model3,model4,model5,model7,model8]


criterion=torch.nn.MSELoss(size_average=False)

op1 = torch.optim.SGD(model1.parameters(),lr = 0.01)#以下分别尝试不同的优化器
op2 = torch.optim.Adagrad(model2.parameters(),lr = 0.01)
op3 = torch.optim.Adam(model3.parameters(),lr = 0.01)
op4 = torch.optim.Adamax(model4.parameters(),lr = 0.01)
op5 = torch.optim.ASGD(model5.parameters(),lr = 0.01)
#op6 = torch.optim.LBFGS(model6.parameters(),lr = 0.01)
op7 = torch.optim.RMSprop(model7.parameters(),lr = 0.01)
op8 = torch.optim.Rprop(model8.parameters(),lr = 0.01)
ops = [op1,op2,op3,op4,op5,op7,op8]

titles = ['SGD','Adagrad','Adam','Adamax','ASGD','RNSprop','Rprop']

index=0
for item in zip(ops,models):
    epochs=[]
    costs=[]
    model=item[1]
    op=item[0]

    for epoch in range(100):
        epochs.append(epoch)
        y_pred=model(x_data)
        loss=criterion(y_pred,y_data)
        costs.append(loss.item())
        print(epoch,loss)

        op.zero_grad()
        loss.backward()
        op.step()

    print('w=',model.linear.weight.item())
    print('b=',model.linear.bias.item())

    x_test=torch.Tensor([[4.0]])
    y_test=model(x_test)
    print('y_pred=',y_test.data)

    plt.plot(epochs,costs)
    plt.ylabel('Cost')
    plt.xlabel('Epoch')
    index+=1
    plt.show()

三、多维输入的逻辑斯蒂回归(sigmoid)

主要解决二分类问题

import numpy as np
import torch
import matplotlib.pyplot as plt

#准备数据
x_data = torch.from_numpy(np.loadtxt('diabetes_data.csv.gz',delimiter=' ',dtype=np.float32))
y_data = torch.from_numpy(np.loadtxt('diabetes_target.csv.gz',dtype=np.float32))

#自定义多层模型
class Model(torch.nn.Module):
    def __init__(self):
        super(Model,self).__init__()
        self.linear1 = torch.nn.Linear(10,8)
        self.linear2 = torch.nn.Linear(8,6)
        self.linear3 = torch.nn.Linear(6,4)
        self.linear4 = torch.nn.Linear(4,1)
        self.sigmoid = torch.nn.Sigmoid() #增加非线性映射

    def forward(self,x):
        #在多隐层的网络中书写前向计算的逻辑时,只用一个变量x串联整个输入输出
        #这是一种习惯
        x = self.sigmoid(self.linear1(x))
        x = self.sigmoid(self.linear2(x))
        x = self.sigmoid(self.linear3(x))
        x = self.sigmoid(self.linear4(x))
        return x

model = Model()

#构建损失函数的计算和优化器
criterion = torch.nn.BCELoss(size_average = True)#逻辑回归模型适用二分类交叉熵损失函数
op = torch.optim.SGD(model.parameters(),lr = 0.1)

epochs = []
costs = []
#训练过程,对每个优化器都进行尝试
for epoch in range(1000):
    epochs.append(epoch)
    # 前向计算
    y_pred = model(x_data)
    loss = criterion(y_pred, y_data)
    costs.append(loss.item())
    print(epoch, loss.item())

    # 反向传播
    op.zero_grad()
    loss.backward()

    #权重更新
    op.step()


# 训练过程可视化
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.plot(epochs, costs)
plt.show()


四、多分类问题

五、CNN

你可能感兴趣的:(Pytorch,深度学习,深度学习,python,人工智能)