利用梯度下降法实现一元线性回归

import numpy as np
import matplotlib.pyplot as plt

#载入数据
data = np.genfromtxt("data.csv", delimiter=",")
x_data = data[:,0]
y_data = data[:,1]


#学习率learning rate
lr = 0.0001
#截距
b = 0
#斜率
k = 0
#最大迭代次数
epochs = 100
#最小二乘法


def compute_error(b, k, x_data, y_data):
    totalError = 0
    for i in range(0, len(x_data)):
        totalError += (y_data[i] - (k * x_data[i] + b)) ** 2
    return totalError / float(len(y_data))

#梯度下降法
def gradient_descent_runner(x_data, y_data, b, k, lr, epochs):
    m = float(len(x_data))
    for i in range(epochs):
        b_grad = 0
        k_grad = 0
        for j in range(0, int(m)):
            b_grad += (k * x_data[i] + b - y_data[i])
            k_grad += ((k * x_data[i] + b - y_data[i]) * x_data[i])

        b_grad /= m
        k_grad /= m

        b = b - lr * b_grad
        k = k - lr * k_grad

        if i % 5 == 0:
            print("epochs:", i)
            plt.plot(x_data, y_data, 'b.')
            plt.plot(x_data, x_data * k + b, 'r')
            plt.show()
    return b, k


print("Starting b = {0}, k = {1}, error = {2}".format(b, k, compute_error(b, k, x_data, y_data)))

print("Running......")

b, k = gradient_descent_runner(x_data, y_data, b, k, lr, epochs)

print("After {0} iterations b = {1}, k = {2}, error = {3}".format(epochs, b, k, compute_error(b, k, x_data, y_data)))

# plt.plot(x_data, y_data, 'b.')
# plt.plot(x_data, x_data * k + b, 'r')
# plt.show()

 

你可能感兴趣的:(机器学习)