人工智能导论——基于梯度下降法的线性回归

基于梯度下降法的线性回归

import numpy as np
import matplotlib.pyplot as plt

# 构造数据集
x = np.random.uniform(-10, 10, 100)
noise = np.random.normal(-1, 1, 100)
y = x * 1.5 + 0.3 + noise

# 将数据及转化为一列
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)

# 将x,y连接起来
dataSet = np.concatenate((x,y), axis=1)
np.random.shuffle(dataSet)

# 分出训练集和测试集
trainSet = dataSet[:80]
testSet = dataSet[80:]

# 画出散点图
plt.scatter(dataSet[:,0], dataSet[:,1])
plt.scatter(testSet[:,0], testSet[:,1])

# 计算损失值
def mse(w, b, dataset):
    loss_mse = 0
    for i in range(len(dataset)):
        x = dataset[i, 0]
        y = dataset[i, 1]
        f = x*w + b
        loss_mse = loss_mse + (y - f) ** 2
    return loss_mse / len(dataset)

# 更新w、b
def gradient_update(w, b, dataset, alpha):
    N = len(dataset)
    w_gradient = 0
    b_gradient = 0
    for i in range(len(dataset)):
        x = dataset[i, 0]
        y = dataset[i, 1]
        w_gradient += (2 / N) * (w*x + b - y) * x
        b_gradient += (2 / N) * (w*x + b - y)
    w_update = w - alpha*(w_gradient/N)
    b_update = b - alpha*(b_gradient/N)
    return w_update, b_update
# 训练次数、步长、存放损失值
epochs = 1000
alpha = 0.01
loss_mse = []

# w、b 随意给定一个初始值
w = np.random.rand()
b = np.random.rand()

for i in range(epochs):
    w, b = gradient_update(w, b, trainSet, alpha)
    loss_mse.append(mse(w, b, trainSet))

# 画出散点图比较
plt.scatter(trainSet[:, 0], trainSet[:, 1])
plt.plot(trainSet[:, 0], trainSet[:, 0] * w + b)
plt.show()

# 画出损失值曲线图
plt.plot(range(epochs), loss_mse)

你可能感兴趣的:(人工智能导论,人工智能,线性回归,python)