import torch
from IPython import display
from matplotlib import pyplot as plt
import numpy as np
import random
num_inputs = 2
num_examples = 1000
w = [2, -3.4]
b = 4.2
features = torch.from_numpy(np.random.normal(0, 1, (num_examples, num_inputs)))
labels = w[0]*features[:, 0] + w[1]* features[:, 1] + b
labels += torch.from_numpy(np.random.normal(0, 0.01, size = labels.size()))
def data_iter(batch_size, features, labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0, num_examples, batch_size):
j = torch.LongTensor(indices[i:min(i + batch_size, num_examples)])
yield features.index_select(0, j), labels.index_select(0, j)
w = torch.tensor(np.random.normal(0, 0.01, (num_inputs, 1)))
b = torch.zeros(1)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
def linreg(X, w, b):
return torch.mm(X, w) + b
def squared_loss(y_hat, y):
return (y_hat - y.view(y_hat.size())) ** 2 / 2
def sgd(params, lr, batch_size):
for param in params:
param.data -= lr * param.grad / batch_size
batch_size = 10
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X, y in data_iter(batch_size, features, labels):
l = loss(net(X, w, b), y).sum()
l.backward()
sgd([w, b], lr, batch_size)
w.grad.data.zero_()
b.grad.data.zero_()
train_l = loss(net(features, w, b), labels)
print('epoch %d, loss %f' % (epoch + 1, train_l.mean().item()))
epoch 1, loss 0.033245
epoch 2, loss 0.000145
epoch 3, loss 0.000053
print(true_w, '\n', w)
print(true_b, '\n', b)
tensor([[ 2.0009], [-3.4001]], dtype=torch.float64, requires_grad=True)
tensor([[ 2.0009], [-3.4001]], dtype=torch.float64, requires_grad=True)
tensor([4.1995], requires_grad=True)
tensor([4.1995], requires_grad=True)
import torch
import numpy as np
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples,
num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
# 读取数据
import torch.utils.data as Data
batch_size = 10
# 将训练数据的特征和标签组合
dataset = Data.TensorDataset(features, labels)
# 随机读取小批量
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
import torch.nn as nn
# class LinearNet(nn.Module):
# def __init__(self, n_feature):
# super(LinearNet, self).__init__()
# self.linear = nn.Linear(n_feature, 1)
# # forward 定义前向传播
# def forward(self, x):
# y = self.linear(x)
# return y
# net = LinearNet(num_inputs)
from collections import OrderedDict
net = nn.Sequential(OrderedDict([
('linear', nn.Linear(num_inputs, 1))
# ......
]))
from torch.nn import init
init.normal_(net[0].weight, mean=0, std=0.01)
init.constant_(net[0].bias, val=0) # 也可以直接修改bias的data:
net[0].bias.data.fill_(0)
loss = nn.MSELoss()
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.03)
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, l.item()))
epoch 1, loss: 0.000086
epoch 2, loss: 0.000026
epoch 3, loss: 0.000095
dense = net[0]
print(true_w, dense.weight)
print(true_b, dense.bias)
[2, -3.4] Parameter containing: tensor([[ 1.9999, -3.3998]], requires_grad=True)
4.2 Parameter containing: tensor([4.1996], requires_grad=True)