【机器学习】线性回归Pytorch实现

import torch
import torch.utils.data as Data
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import numpy as np



# 生成数据集

num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)



# 读取数据

batch_size = 10
dataset = Data.TensorDataset(features,labels)
data_iter = Data.DataLoader(dataset,batch_size,shuffle=True)

# for X,y in data_iter:
#    print(X,y)
#    break



# 定义模型

class LinearNet(nn.Module):
    def __init__(self,n_feature):
        super().__init__()
        self.linear = nn.Linear(n_feature,1)

    def forward(self,x):
        y = self.linear(x)
        return y

net = LinearNet(num_inputs)
print(net)


## 使用 nn.Sequential 更方便地搭建网络

# 写法一
net = nn.Sequential(
    nn.Linear(num_inputs,1)
    # 此处可继续加入其他层
)

# 写法二
net = nn.Sequential()
net.add_module('linear',nn.Linear(num_inputs,1))
# net.add_module ......

# 写法三
from collections import OrderedDict
net = nn.Sequential(OrderedDict([
          ('linear', nn.Linear(num_inputs, 1))
          # ......
        ]))


print(net)
print(net[0])


for param in net.parameters():
    print(param)



# 初始化模型参数

init.normal_(net[0].weight,mean=0,std=0.01)
init.constant_(net[0].bias,val=0)  # 也可以直接修改bias的data: net[0].bias.data.fill_(0)



# 定义损失函数

loss = nn.MSELoss()



# 定义优化算法

optimizer = optim.SGD(net.parameters(),lr=0.03)
print(optimizer)

# 为不同子网络设置不同的学习率
# optimizer =optim.SGD([
#                 # 如果对某个参数不指定学习率,就使用最外层的默认学习率
#                 {'params': net.subnet1.parameters()}, # lr=0.03
#                 {'params': net.subnet2.parameters(), 'lr': 0.01}
#             ], lr=0.03)
# 调整学习率
# for param_group in optimizer.param_groups:
#     param_group['lr'] *= 0.1



# 训练模型

num_epochs = 3
for epoch in range(1,num_epochs + 1):
    for X,y in data_iter:
        output = net(X)
        l = loss(output,y.view(-1,1))
        optimizer.zero_grad()
        l.backward()
        optimizer.step()
    print(f'epoch {epoch}, loss {l.item()}')


dense = net[0]
print(true_w,dense.weight)
print(true_b,dense.bias)

参考资料 :Dive into Deep Learning - Pytorch版本

你可能感兴趣的:(机器学习,深度学习,pytorch,python,线性回归)