梯度下降算法:
import random
import numpy as np
k1 = random.random()
k2 = random.random()
k0 = random.random()
a = 0.01
x = np.array([[2104, 3],
[1600, 3],
[2400, 3],
[1416, 2],
[3000, 4]])
t = np.array([[400],
[330],
[369],
[232],
[40]])
eps = 0.18
h = np.array([[0],
[0],
[0],
[0],
[0]])
d0 = 0
d1 = 0
d2 = 0
for i in range(0, 5):
h[i] = k0 + k1 * x[i, 0] + k2 * x[i, 1]
d0 = d0 + (h[i] - t[i])
d1 = d1 + (h[i] - t[i]) * x[i, 0]
d2 = d2 + (h[i] - t[i]) * x[i, 1]
while d0 >= eps or d1 >= eps or d2 >= eps:
k0 = k0 - a * d0
k1 = k1 - a * d1
k2 = k2 - a * d2
for i in range(0, 5):
h[i] = k0 + k1 * x[i, 0] + k2 * x[i, 1]
d0 = d0 + (h[i] - t[i])
d1 = d1 + (h[i] - t[i]) * x[i, 0]
d2 = d2 + (h[i] - t[i]) * x[i, 1]
print(k0, k1, k2)
0.21288868557537322 0.1028509984378464 0.6999382141996725
21/11/12 更新:随机梯度下降法:
import random
import numpy as np
theta0 = random.random()
theta1 = random.random()
theta2 = random.random()
a = 0.01
x = np.array([[2104, 3],
[1600, 3],
[2400, 3],
[1416, 2],
[3000, 4]])
y = np.array([[400],
[330],
[369],
[232],
[40]])
eps = 0.18
h = np.array([[0],
[0],
[0],
[0],
[0]])
loss = 50
n = 0
while loss>=eps and n<1000:
loss = 0
i = random.randint(0, 4)
h[i] = theta0 + theta1 * x[i, 0] + theta2 * x[i, 1]
theta0 = theta0 + a * (h[i] - y[i])
theta1 = theta1 + a * (h[i] - y[i]) * x[i, 0]
theta2 = theta2 + a * (h[i] - y[i]) * x[i, 1]
loss = loss + (h[i] - y[i]) ** 2
n = n + 1
print(theta0, theta1, theta2)