pytorch以及numpy 实现线性模型(code)

numpy 实现线性回归

import numpy as np
from matplotlib import pyplot

x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
                    [9.779], [6.182], [7.59], [2.167], [7.042],
                    [10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
 
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
                    [3.366], [2.596], [2.53], [1.221], [2.827],
                    [3.465], [1.65], [2.904], [1.3]], dtype=np.float32)

import numpy as np
import matplotlib.pyplot as plt


def compute_cost(theta, X, y):
    """
    :param theta:权重参数
    :param X:
    :param y:
    :return:
    """
    return np.dot(np.transpose(np.dot(X, theta) - y), (np.dot(X, theta) - y)) / 2 * len(X)


def scatter_plot(X, y):
    """
    散点图展示
    :param X:
    :param y:
    :param path: save_file_path
    :return:
    """
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.scatter(X, y)
    plt.show()


def func_plot(X, y):
    """
    函数直线绘图
    :param X:
    :param y:
    :param path:
    :return:
    """
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(X, y)
    plt.show()


def plotJ_iters(J, iters):
    x = np.arange(1, iters + 1)
    plt.plot(x, J)
    plt.xlabel('iter')
    plt.ylabel('cost')
    plt.title('iter-cost')
    plt.show()

def gradient_descent(X, y, theta, alpha, iter_num):
    """
    :param X: 特征值
    :param y: 结果值
    :param theta: 学习参数
    :param alpha: 学习速度
    :param iter_num: 迭代次数
    :return:
    """
    m = X.shape[0]  # 数据集数量
    n = X.shape[1]  # 特征数量
    J_history = np.zeros(shape=(iter_num, 1))
    for i in range(iter_num):
        h = np.dot(X, theta)  # 预测值 theta shape:(n, 1) X shape:(m, n)  h shape:(m,1)
        theta = theta - alpha * 1 / m * np.dot(np.transpose(X), h - y)
        if (i % 2 == 0):
            fig = plt.figure()
            ax = fig.add_subplot(1,1,1)
            ax.scatter(X[:,0], y)
            ax.plot(X[:,0], h, 'red')
            plt.show()
        J_history[i] = compute_cost(theta, X, y)
    return theta, J_history


def linear_regression():
    X = x_train
    y = y_train
    # 增加噪音参数
    noise = np.ones(shape=(len(X), 1), dtype=np.float64)
    X = np.column_stack((X, noise))
    # y = y.reshape(-1, 1)
    theta = np.zeros(shape=(X.shape[1], 1))
    theta, J_history = gradient_descent(X, y, theta, 0.01, iter_num=10)
    # plotJ_iters(J_history, 50)
    print('theta :', theta.reshape(1, X.shape[1]))

linear_regression()

pytorch 实现线性模型

# pytorch 实现 线性模型
import torch
from torch.autograd import Variable
from torch import nn
from torch import optim

x_train = np.array([[3.3], [4.4], [5.5], [6.71], [6.93], [4.168],
                    [9.779], [6.182], [7.59], [2.167], [7.042],
                    [10.791], [5.313], [7.997], [3.1]], dtype=np.float32)
 
y_train = np.array([[1.7], [2.76], [2.09], [3.19], [1.694], [1.573],
                    [3.366], [2.596], [2.53], [1.221], [2.827],
                    [3.465], [1.65], [2.904], [1.3]], dtype=np.float32)

# 将narray转化为张量
x_train = torch.from_numpy(x_train)
y_train = torch.from_numpy(y_train)

# 定义线性模型

class LinearRegression(nn.Module):
    def __init__(self):
        super(LinearRegression, self).__init__()
        self.linear = nn.Linear(1, 1)
    
    def forward(self, x):
        out = self.linear(x)
        return out
    
linear_model = LinearRegression()


# 定义 loss函数(最小二乘) 梯度下降
criterion = nn.MSELoss()
optimizer = optim.SGD(linear_model.parameters(), lr = 0.001)  # 学习率 0.001

# 训练
epochs = 1000
for epoch in range(epochs):
    inputs = Variable(x_train)
    target = Variable(y_train)

    # 向前传播
    out = linear_model(inputs)
    loss = criterion(out, target)

    # 反向传播
    optimizer.zero_grad() # 梯度清零
    loss.backward() # 反向传播
    optimizer.step() # 更新参数

    if (epoch + 1) % 20 == 0:
        # print('Epoch[{}/{}], loss: {:6f}'.format(epoch, epochs, loss.data[0]))
        print('Epoch[{}/{}], loss: {:.6f}'.format(epoch + 1, epochs, loss.data))

linear_model.eval()
predict = linear_model(Variable(x_train))
predict = predict.data.numpy()
pyplot.plot(x_train.numpy(), y_train.numpy(), 'ro', label='Original data')
pyplot.plot(x_train.numpy(), predict, label='Fitting Line')
# 显示图例
pyplot.legend()
pyplot.show()

# 保存模型
torch.save(linear_model.state_dict(), './linear.pth')

 

你可能感兴趣的:(ML)