# 线性回归模型算法
import numpy as np
import matplotlib.pyplot as plt
def gradient_descent(X, Y, iteration=200, learning_rate=0.067): # 线性回归梯度下降程序
print("running gradient descent...")
mExamples = X.shape[0]
nFeatures = X.shape[1]
# 训练数据中添加一列为全0,作为x0,theta0的参数,
# 训练集的维度为m*(n+1)
trainX = np.zeros((mExamples, nFeatures+1))
trainX[:, 0] = np.ones(mExamples)
trainX[:, 1:] = X
# 定义标签数组
trainY = np.zeros((mExamples, 1))
trainY[:,0] = Y
# 初始化模型参数,维度为(n+1)*1
parameters = np.zeros((nFeatures+1, 1))
# 迭代模型参数
iteration_i = 0
while iteration_i < iteration:
#定义梯度函数
Z = np.dot(trainX, parameters)
PartDeri = np.dot(trainX.T, (Z-Y))/mExamples
#更新梯度
parameters = parameters - learning_rate*PartDeri
# 定义代价函数
cost = np.sum(np.square(np.dot(trainX, parameters) - trainY) / (2.0 * mExamples))
print("[", iteration_i, "]", "[cost]", cost)
iteration_i = iteration_i + 1
predict = np.dot(trainX, parameters)
return predict
if __name__ == '__main__':
# 创建数据集合
X = np.arange(0, 10, 1).reshape(10, 1)
print("X:", X)
Y = np.ones(10)
print("Y: ", Y)
plt.figure()
plt.xlim((1, 10))
plt.ylim((-2, 2))
# 红线为理想曲线
plt.plot(X, Y, color='r')
# 蓝线为拟合曲线
predict = gradient_descent(X, Y)
plt.plot(X, predict, color='b')
plt.show()