ML线性回归 2022-02-19

#线性回归的梯度下降
import numpy as np
import matplotlib.pyplot as plt
'''
dataset 数据集(Training Set)
m=Number of training examples
x="input"variable/features
y="output"variable/"target"variable
(x(i),y(i))=ith training example
α称为learning rate,控制下降速度
'''
m=20

X0=np.ones((m,1))#m行一列

X1=np.arange(1,m+1).reshape(m,1)#m行一列

X=np.hstack((X0,X1))#组合成两列


y=np.array([3,4,5,5,2,4,7,8,11,8,12,11,13,13,16,17,18,17,19,21]).reshape(m,1)


alpha=0.01
iteration=10000

'''
Linear hypothesis function:  h(x)=θ0+θ1x
Cost function(平方误差函数 squared error function): J(θ0,θ1)=(1/(2m))*(   (m个i求和)(h(x(i))-y(i))^2    )
目标是寻找θ0和θ1使J最小(使用梯度下降(gradient descent)寻找)
'''
def error_function(theta,X,y):
    #diff=h(x)-y
    diff=np.dot(X,theta)-y
    #返回cost function的解
    return (1./2*m)*np.dot(np.transpose(diff),diff)

def gradient_function(theta,X,y):
    diff=np.dot(X,theta)-y
    #返回cost function的梯度
    return (1./m)*np.dot(np.transpose(X),diff)

def gradient_descent(X,y,alpha,iteration):
    #初始化theta
    theta=np.array([100,-5]).reshape(2,1)

    gradient=gradient_function(theta,X,y)#梯度初始化
    for i in range(iteration):#迭代次数
        theta=theta-alpha*gradient
        gradient=gradient_function(theta,X,y)
    return theta

theta1=gradient_descent(X,y,alpha,iteration)
print('theta:',theta1)

print('minimum cost function:',error_function(theta1,X,y))

plt.scatter(X1,y)

y1=np.dot(X,theta1)
plt.plot(X1,y1)
plt.show()
#线性回归的正规方程解法
import numpy as np
import matplotlib.pyplot as plt

m=20

X0=np.ones((m,1))#m行一列

X1=np.arange(1,m+1).reshape(m,1)#m行一列

X=np.hstack((X0,X1))#组合成两列


y=np.array([3,4,5,5,2,4,7,8,11,8,12,11,13,13,16,17,18,17,19,21]).reshape(m,1)




theta=np.dot(np.dot(np.linalg.inv((np.dot(np.transpose(X),X))),np.transpose(X)),y)



plt.scatter(X1,y)

y1=np.dot(X,theta)
plt.plot(X1,y1)
plt.show()

你可能感兴趣的:(ML线性回归 2022-02-19)