import torch
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
boston = datasets.load_boston()
X = boston.data
y = boston.target
X = X[y < 50.0]
y = y[y < 50.0]
X_train, X_test, y_train, y_test = train_test_split(X, y)
standardScaler = StandardScaler()
standardScaler.fit(X_train)
X_train = standardScaler.transform(X_train)
X_test = standardScaler.transform(X_test)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
((367, 13), (123, 13), (367,), (123,))
#net
class Net(torch.nn.Module):
def __init__(self, n_feature, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, 100)
self.predict = torch.nn.Linear(100, n_output)
def forward(self, x):
out = self.hidden(x)
out = torch.relu(out)
out = self.predict(out)
return out
net = Net(13, 1)
#loss
loss_func = torch.nn.MSELoss()
#optimiter
optimizer = torch.optim.Adam(net.parameters(), lr=0.01)
#training
for i in range(10000):
x_data = torch.tensor(X_train, dtype=torch.float32)
y_data = torch.tensor(y_train, dtype=torch.float32)
pred = net.forward(x_data)
# squeeze(a)就是将a中所有为1的维度删掉
pred = torch.squeeze(pred)
loss = loss_func(pred, y_data) * 0.001
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("ite:{}, loss_train:{}".format(i, loss))
print(pred[0:10])
print(y_data[0:10])
#test
x_data = torch.tensor(X_test, dtype=torch.float32)
y_data = torch.tensor(y_test, dtype=torch.float32)
pred = net.forward(x_data)
pred = torch.squeeze(pred)
loss_test = loss_func(pred, y_data) * 0.001
print("ite:{}, loss_test:{}".format(i, loss_test))
torch.save(net, "boston_model.pkl")
net = torch.load("boston_model.pkl")
loss_func = torch.nn.MSELoss()
#test
x_data = torch.tensor(X_test, dtype=torch.float32)
y_data = torch.tensor(y_test, dtype=torch.float32)
pred = net.forward(x_data)
pred = torch.squeeze(pred)
loss_test = loss_func(pred, y_data) * 0.001
print("loss_test:{}".format(loss_test))
loss_test:0.015603514388203621