Batch Gradient Descent(python)

import numpy as np
import tensorflow as tf


def GradientDescent(x,y,theta):
    m, n = x.shape  # m is #training example,while n is #feature
    for j in range(n):
        #learning rate:0.03
        theta[j] = theta[j] + 0.03/m * np.sum(([(y[i] -np.matmul(x[i,:],theta))*x[i,j] for i in range(m)]))
    return theta

x = np.array([[1],[2],[3],[4],[5],[6]])
y = np.array([5,7,9,11,13,15])
#the stop conditon
epison = 0.01
#add x0=1 to the data
x1 = np.hstack((np.ones((6,1)), x))
theta = np.zeros((2,1))
# m,n = x1.shape
print(x1.shape)

while(True):
    theta = GradientDescent(x1, y, theta)
    prediction = np.matmul(x1, theta)
    loss = np.sum((prediction.T - y)**2)
    if loss < epison:
        break

print('prediction=',prediction.T)
print('y=',y)
print('loss=',loss)

你可能感兴趣的:(Batch Gradient Descent(python))