import numpy as np
import matplotlib.pyplot as plt
def cost_gradient(W, X, Y, n):
#G = ###### Gradient
#G = 两行一列,存放w,b对j的偏导
yhat = X.dot(W) #目标函数
#print("w" + str(W.shape))
#print("x" + str(X.shape))
#print("yhat"+ str(yhat.shape))
L = ((yhat - Y) ** 2) / 2 # 损失函数
aw = np.sum(X.T.dot(yhat-Y))/n #对w的偏导
ab =np.sum(yhat - Y) / n #b的偏导
G = np.c_[aw,ab].T ###### Gradient
# 代价函数,损失函数的平均值
j = np.sum(L)/n # cost with respect to current W
return (j, G)
def gradientDescent(W, X, Y, lr, iterations):
n = np.size(Y) #Y的个数
J = np.zeros([iterations, 1]) # 生成一个50行1列的0 矩阵
for i in range(iterations):
cost_gradient(W, X, Y, n)
(J[i], G) = cost_gradient(W, X, Y, n)
W = W - (lr*G)# Update W based on gradient
#w = w - (学习率*梯度值)
#b = w - (学习率*梯度值)
return (W,J)
# iterations = ###### Training loops
# lr = ###### Learning rate
lr = 0.00025 ###### Learning rate
data = np.loadtxt('LR.txt', delimiter=',') # 读取文件data
n = np.size(data[:, 1]) # 第二列元素的个数
iterations = n #训练循环的次数
W = np.ones([2, 1]) # 生成一个一行2行1列的0 矩阵,存放w,b
X = np.c_[np.ones([n, 1]), data[:, 0]] # 从数据中提取变量X
Y = data[:, 1].reshape([n, 1])# 从数据第二列提取变量Y
(W,J) = gradientDescent(W, X, Y, lr, iterations)
# Draw figure
plt.figure() #画图
plt.plot(data[:, 0], data[:, 1], 'rx')#画所有点
plt.plot(data[:,0], np.dot(X,W))#画拟合曲线
plt.show()#显示
plt.figure()
plt.plot(range(iterations), J)#画代价函数
plt.show()#显示