线性模型及其可视化

穷举法寻找线性模型最优参数 线性模型 y = w*x

import numpy as np
import matplotlib.pyplot as plt
train_x = [1.0, 2.0, 3.0]
train_y = [2.0, 4.0, 6.0]

def forward(x):
    return x * w

def loss(y_pred, y):
    return (y_pred - y)**2

w_list = []
train_loss_list = []
for w in np.arange(0.0, 4.1, 0.1):
    l_sum = 0
    for x, y in zip(train_x, train_y):
        y_pred = forward(x)
        loss_val = loss(y_pred, y)
        l_sum += loss_val
    w_list.append(w)
    train_loss_list.append(l_sum / len(train_x))
    print('MSE', l_sum / len(train_x))

plt.plot(w_list, train_loss_list)
plt.xlabel('w')
plt.ylabel('MSE')
plt.show()

线性模型及其可视化_第1张图片
线性模型 y = w * x + b
注意可视化过程中有两个参数w和b共同影响loss函数的值,需要用到np.meshgrid()函数,此函数将w_list中的所有值与b_list中的所有值一一对应,最后返回两个(len(w_list) , len(b_list)) 的二维数据

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
train_x = [1.0, 2.0, 3.0]
train_y = [2.0, 4.0, 6.0]

def forward(x):
    return x * w + b

def loss(y_pred, y):
    return (y_pred - y)**2

w_list = [w for w in np.arange(0.0, 4.1, 0.1)]
b_list = [b for b in np.arange(-2.0, 2.0, 0.1)]
X, Y = np.meshgrid(w_list, b_list)
print(X.shape, Y.shape)
train_loss_list = []

for w, b in zip(X, Y):
    l_sum = 0
    for x, y in zip(train_x, train_y):
        y_pred = forward(x)
        loss_val = loss(y_pred, y)
        l_sum += loss_val
    train_loss_list.append(l_sum / len(train_x))
# 画曲面图
fig = plt.figure()
ax = plt.axes(projection = '3d')
X, Y = np.meshgrid(w_list, b_list)
Z = np.array(train_loss_list).reshape(X.shape)
print(Z.shape)
ax.plot_surface(X, Y, Z)
plt.show()

输出:

(40, 41) (40, 41)
(40, 41)

线性模型及其可视化_第2张图片

你可能感兴趣的:(python,机器学习,pytorch)