测试集 x=5, y=? ‘‘‘

                    

import numpy as np

from matplotlib import pyplot as plt

x_data = [1,2,3,4]

y_data = [6.8,9.8,13.2,16.2]

loss_list = list()

def forward(a,x,b):

    return a*x+b

def lossFunction(a,x,y,b):

    y_pred = forward(a,x,b)

    loss = (y_pred - y)**2

    return loss

a_list = list()

b_list = list()

if __name__ == '__main__':

    for a in np.arange(0,6,0.1):

        for b in np.arange(0,6,0.1):

            sum_loss = 0

            for i in range(4):

                sum_loss += lossFunction(a, x_data[i], y_data[i],b)

            loss_list.append(sum_loss/4)

            a_list.append(a)

            b_list.append(b)

    plt.plot(a_list,loss_list)

    plt.xlabel('a')

    plt.ylabel('loss')

    print(min(loss_list))

    loss_min_index = loss_list.index(min(loss_list))

    print(loss_min_index)

    a_wanted = a_list[loss_min_index]

    b_wanted = b_list[loss_min_index]

    print(f'a_wanted = {a_wanted}, b_wanted ={b_wanted}')

    # plt.show()

    # a_wanted = a_list[loss_list.index(min(loss_list))]

    # print(forward(a_wanted, 4))

    print(forward(a_wanted, 5, b_wanted))

采用梯度方法

import numpy as np

from matplotlib import pyplot as plt

data_x = [1, 2, 3]

data_y = [2, 4, 6]

loss_list = list()

a_list = list()

alpha = 0.01

def forward(x):

    return a * x

def lossFunction(x, y):

    y_pred = forward(x)

    loss = (y_pred - y) ** 2

    return loss

def predict(x, a_):

    return a_ * x

def gradient(a, x, y):

    a = a - alpha * 2 * (a * x - y) * x

    return a

if __name__ == '__main__':

    a = 0

    for epoch in range(1000):

    # for a in np.arange(0, 4, 0.1):

        sum_loss = 0

        for i in range(3):

            sum_loss += lossFunction(data_x[i], data_y[i])

            a = gradient(a, data_x[i], data_y[i])

        loss_list.append(sum_loss / 3)

        a_list.append(a)

    plt.subplot(211)

    plt.plot(a_list)

    plt.subplot(212)

    plt.plot(loss_list)

    plt.show()

    plt.figure()

    plt.plot(a_list, loss_list)

    plt.xlabel('a')

    plt.ylabel('loss')

    plt.show()

    min_value = min(loss_list)

    index_lossMin = loss_list.index(min_value)

    print(index_lossMin)

    proper_a = a_list[index_lossMin]

    print(proper_a)

    print("Please input the desired x:")

    desired_x = input()

    print(f"The predict output for the linear model is {predict(float(desired_x), proper_a)}")

loss最小

import numpy as np

from matplotlib import pyplot as plt

data_x = [1, 2, 3]

data_y = [2, 4, 6]

loss_list = list()

a_list = list()

def forward(x):

    return a * x

def lossFunction(x, y):

    y_pred = forward(x)

    loss = (y_pred - y) ** 2

    return loss

def predict(x,a_):

    return a_*x

if __name__ == '__main__':

    for a in np.arange(0, 4, 0.1):

        sum_loss = 0

        for i in range(3):

            sum_loss += lossFunction(data_x[i], data_y[i])

        loss_list.append(sum_loss / 3)

        a_list.append(a)

    plt.figure()

    plt.plot(a_list, loss_list)

    # plt.title("")

    plt.xlabel('a')

    plt.ylabel('loss')

# plt.show()

    min_value = min(loss_list)

    index_lossMin = loss_list.index(min_value)

    print(index_lossMin)

    proper_a = a_list[index_lossMin]

    print(proper_a)

    print("Please input the desired x:")

    desired_x = input()

    print(f"The predict output for the linear model is {predict(float(desired_x),proper_a)}")

import time

import numpy as np

from matplotlib import pyplot as plt

import random

你可能感兴趣的:(python)