Pytorch 实现一维线性回归

import torch
import numpy as np
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data import Dataset,DataLoader
import pandas as pd

# 定义分类器
class LinearRegression(nn.Module):
 	def __init__(self):
 		super(LinearRegression, self).__init__()
 		self.linear = nn.Linear(1,1)

 	def forward(self,x):
 		out = self.linear(x)
 		return out


if __name__ == '__main__':

	if torch.cuda.is_available():
		model = LinearRegression().cuda()
	else:
 		model = LinearRegression()

	x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
		[9.779], [6.182], [7.59], [2.167], [7.042],
		[10.791], [5.313] , [7.997], [3.1]], dtype=np.float32)

	y_train = np.array([[1.7], [2.76], [2.09], [3.19],[1.694], [1.573],
		[3.366], [2.596], [2.53], [1.221], [2.827],
	 	[3.465], [1.65], [2.904], [1.3]], dtype=np.float32)

	x_train = torch.from_numpy(x_train)
	y_train = torch.from_numpy(y_train)

	criterion = nn.MSELoss()
	optimizer = optim.SGD(model.parameters(), lr = 1e-3)

	num_epochs = 100
	# 将数据变成Variable放入计算图
	for epoch in range(num_epochs):

		if torch.cuda.is_available():
			inputs = Variable(x_train).cuda()
			target = Variable(y_train).cuda()
		else:
			inputs = Variable(x_train)
			target = Variable(y_train)

		# forward
		out = model(inputs)  # 得到网络前向传播结果
		loss = criterion(out, target) # 得到损失函数

		# backward
		optimizer.zero_grad() 
		''' 
		每次做反向传播之间都要归零梯度,不然梯度会累加在一起,造成结果不收敛'
		'''
		loss.backward()
		optimizer.step()

		if (epoch + 1) % 20 == 0:
			print('Epoch[{}/{}], loss:{:.6f}'.format(epoch+1,
				num_epochs, loss.data[0])) 
			'''
			注意loss是一个Variable,所以通过lossdata可以取出一个Tensor,
			再通过loss.data[0],得到一个int或者float类型的数据
			'''

	model.eval() # 将模型变成测试模式,因为有一些层操作,比如Dropout和BatchNormalization在训练和测试的时候不一样
	predict = model(Variable(x_train).cuda())
	predict = predict.cpu()
	predict = predict.data.numpy()
	print(predict)

实验结果如图所示:
Pytorch 实现一维线性回归_第1张图片

你可能感兴趣的:(Pytorch)