用pytorch实现逻辑回归
import torch
from torch.autograd import Variable
torch.manual_seed(2)
x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0], [4.0]]))
y_data = Variable(torch.Tensor([[0.0], [0.0], [1.0], [1.0]]))
#初始化
w = Variable(torch.Tensor([-1]), requires_grad=True)
b = Variable(torch.Tensor([0]), requires_grad=True)
epochs = 100
costs = []
lr = 0.1
print("before training, predict of x = 1.5 is:")
print("y_pred = ", float(w.data*1.5 + b.data > 0))
#模型训练
for epoch in range(epochs):
#计算梯度
A = 1/(1+torch.exp(-(w*x_data+b))) #逻辑回归函数
J = -torch.mean(y_data*torch.log(A) + (1-y_data)*torch.log(1-A)) #逻辑回归损失函数
#J = -torch.mean(y_data*torch.log(A) + (1-y_data)*torch.log(1-A)) +alpha*w**2
#基础类进行正则化,加上L2范数
costs.append(J.data)
J.backward() #自动反向传播
#参数更新
w.data = w.data - lr*w.grad.data
w.grad.data.zero_()
b.data = b.data - lr*b.grad.data
b.grad.data.zero_()
print("after training, predict of x = 1.5 is:")
print("y_pred =", float(w.data*1.5+b.data > 0))
print(w.data, b.data)
before training, predict of x = 1.5 is:
y_pred = 0.0
after training, predict of x = 1.5 is:
y_pred = 0.0
tensor([ 0.6075]) tensor([-0.9949])
[Finished in 0.4s]
用pytorch实现torch.nn.module
import torch
from torch.autograd import Variable
torch.manual_seed(2)
x_data = Variable(torch.Tensor([[1.0], [2.0], [3.0], [4.0]]))
y_data = Variable(torch.Tensor([[0.0], [0.0], [1.0], [1.0]]))
#定义网络模型
#先建立一个基类Module,都是从父类torch.nn.Module继承过来,Pytorch写网络的固定写法
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__() #初始父类
self.linear = torch.nn.Linear(1, 1) #输入维度和输出维度都为1
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = Model() #实例化
#定义loss和优化方法
criterion = torch.nn.BCEWithLogitsLoss() #损失函数,封装好的逻辑损失函数
optimizer = torch.optim.SGD(model.parameters(), lr=0.01) #进行优化梯度下降
#optimizer = torch.optim.SGD(model.parameters(), lr=0.01, weight_decay=0.001)
#Pytorch类方法正则化方法,添加一个weight_decay参数进行正则化
#befor training
hour_var = Variable(torch.Tensor([[2.5]]))
y_pred = model(hour_var)
print("predict (before training)given", 4, 'is', float(model(hour_var).data[0][0]>0.5))
epochs = 40
for epoch in range(epochs):
#计算grads和cost
y_pred = model(x_data) #x_data输入数据进入模型中
loss = criterion(y_pred, y_data)
print('epoch = ', epoch+1, loss.data[0])
optimizer.zero_grad() #梯度清零
loss.backward() #反向传播
optimizer.step() #优化迭代
#After training
hour_var = Variable(torch.Tensor([[4.0]]))
y_pred = model(hour_var)
print("predict (after training)given", 4, 'is', float(model(hour_var).data[0][0]>0.5))
predict (before training)given 4 is 0.0
[Decode error - output not utf-8]
epoch = 1 tensor(0.6004)
epoch = 2 tensor(0.5998)
epoch = 3 tensor(0.5993)
epoch = 4 tensor(0.5987)
epoch = 5 tensor(0.5982)
epoch = 6 tensor(0.5977)
epoch = 7 tensor(0.5972)
epoch = 8 tensor(0.5967)
epoch = 9 tensor(0.5962)
epoch = 10 tensor(0.5957)
epoch = 11 tensor(0.5952)
epoch = 12 tensor(0.5947)
epoch = 13 tensor(0.5943)
epoch = 14 tensor(0.5938)
epoch = 15 tensor(0.5934)
epoch = 16 tensor(0.5930)
epoch = 17 tensor(0.5926)
epoch = 18 tensor(0.5921)
epoch = 19 tensor(0.5917)
epoch = 20 tensor(0.5913)
epoch = 21 tensor(0.5909)
epoch = 22 tensor(0.5906)
epoch = 23 tensor(0.5902)
epoch = 24 tensor(0.5898)
epoch = 25 tensor(0.5894)
epoch = 26 tensor(0.5891)
epoch = 27 tensor(0.5887)
epoch = 28 tensor(0.5884)
epoch = 29 tensor(0.5880)
epoch = 30 tensor(0.5877)
epoch = 31 tensor(0.5873)
epoch = 32 tensor(0.5870)
epoch = 33 tensor(0.5867)
epoch = 34 tensor(0.5863)
epoch = 35 tensor(0.5860)
epoch = 36 tensor(0.5857)
epoch = 37 tensor(0.5854)
epoch = 38 tensor(0.5851)
epoch = 39 tensor(0.5848)
epoch = 40 tensor(0.5845)
predict (after training)given 4 is 1.0
[Finished in 0.5s]