生成数据集:
import torch
import matplotlib.pyplot as plt
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
y = 2 * x + 1 + torch.rand(x.size())
# torch.rand 均匀分布, torch.randn 标准正态分布
# 上面这行代码是制造出接近y=2x+1的数据集,后面加上torch.rand()函数制造噪音
# 画图
plt.scatter(x.data.numpy(), y.data.numpy())
plt.show()
# !/usr/bin/python
# coding: utf8
import torch
from torch.autograd import Variable
import matplotlib.pyplot as plt
from torch import nn
x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
y = 2 * x + 1 + torch.rand(x.size())
# torch.rand 均匀分布, torch.randn 标准正态分布
# 上面这行代码是制造出接近y=2x+1的数据集,后面加上torch.rand()函数制造噪音
# 画图
# plt.scatter(x.data.numpy(), y.data.numpy())
# plt.show()
class LinearRegression(nn.Module):
def __init__(self):
super(LinearRegression, self).__init__()
self.linear = nn.Linear(1, 1) # 输入和输出的维度都是1
def forward(self, x):
out = self.linear(x)
return out
if torch.cuda.is_available():
model = LinearRegression().cuda()
else:
model = LinearRegression()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)
num_epochs = 1000
for epoch in range(num_epochs):
if torch.cuda.is_available():
inputs = Variable(x).cuda()
target = Variable(y).cuda()
else:
inputs = Variable(x)
target = Variable(y)
# 向前传播
out = model(inputs)
loss = criterion(out, target)
# 向后传播
optimizer.zero_grad() # 注意每次迭代都需要清零
loss.backward()
optimizer.step()
if (epoch + 1) % 20 == 0:
print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.data[0]))
model.eval()
if torch.cuda.is_available():
predict = model(Variable(x).cuda())
predict = predict.data.cpu().numpy()
else:
predict = model(Variable(x))
predict = predict.data.numpy()
plt.plot(x.numpy(), y.numpy(), 'ro', label='Original Data')
plt.plot(x.numpy(), predict, label='Fitting Line')
plt.show()
输出结果:
/home/pt/miniconda3/envs/qyh/bin/python /home/pt/yaotl/PycharmProjects/linear.py
/home/pt/yaotl/PycharmProjects/linear.py:55: UserWarning: invalid index of a 0-dim tensor. This will be an error in PyTorch 0.5. Use tensor.item() to convert a 0-dim tensor to a Python number
print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.data[0]))
Epoch[20/1000], loss:2.819824
Epoch[40/1000], loss:1.439924
Epoch[60/1000], loss:0.791905
Epoch[80/1000], loss:0.477968
Epoch[100/1000], loss:0.318931
Epoch[120/1000], loss:0.233500
Epoch[140/1000], loss:0.184350
Epoch[160/1000], loss:0.154017
Epoch[180/1000], loss:0.134083
Epoch[200/1000], loss:0.120316
Epoch[220/1000], loss:0.110465
Epoch[240/1000], loss:0.103246
Epoch[260/1000], loss:0.097876
Epoch[280/1000], loss:0.093844
Epoch[300/1000], loss:0.090801
Epoch[320/1000], loss:0.088495
Epoch[340/1000], loss:0.086745
Epoch[360/1000], loss:0.085415
Epoch[380/1000], loss:0.084404
Epoch[400/1000], loss:0.083635
Epoch[420/1000], loss:0.083050
Epoch[440/1000], loss:0.082605
Epoch[460/1000], loss:0.082266
Epoch[480/1000], loss:0.082008
Epoch[500/1000], loss:0.081812
Epoch[520/1000], loss:0.081663
Epoch[540/1000], loss:0.081549
Epoch[560/1000], loss:0.081462
Epoch[580/1000], loss:0.081396
Epoch[600/1000], loss:0.081346
Epoch[620/1000], loss:0.081308
Epoch[640/1000], loss:0.081279
Epoch[660/1000], loss:0.081257
Epoch[680/1000], loss:0.081240
Epoch[700/1000], loss:0.081227
Epoch[720/1000], loss:0.081218
Epoch[740/1000], loss:0.081210
Epoch[760/1000], loss:0.081205
Epoch[780/1000], loss:0.081200
Epoch[800/1000], loss:0.081197
Epoch[820/1000], loss:0.081195
Epoch[840/1000], loss:0.081193
Epoch[860/1000], loss:0.081191
Epoch[880/1000], loss:0.081190
Epoch[900/1000], loss:0.081189
Epoch[920/1000], loss:0.081189
Epoch[940/1000], loss:0.081188
Epoch[960/1000], loss:0.081188
Epoch[980/1000], loss:0.081188
Epoch[1000/1000], loss:0.081187
Process finished with exit code 0