最小二乘法python实现

最小二乘法回归参数梯度

最小二乘法python实现_第1张图片

代码

import pandas as pd
import numpy as np


df = pd.read_csv('https://archive.ics.uci.edu/ml/'
                     'machine-learning-databases/iris/iris.data', header = None)


import matplotlib.pyplot as plt

y = df.iloc[0:400,4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:400, [0,2]].values



class AdaLineGD(object):
    def __init__(self,eta = 0.01, n_iter = 100):
        self.eta = eta
        self.n_iter = n_iter

    def net_input(self,X):
        return np.dot(X, self.w[1:]) + self.w[0]

    def fit(self, X, y):
        self.w = np.zeros(1 + X.shape[1])
        self.cost = []
        for _ in range(self.n_iter):
            output = self.net_input(X)
            error = y - output
            self.w[1:] += self.eta * X.T.dot(error)
            self.w[0] += self.eta * error.sum()
            cost = (error ** 2).sum()/ 2.0
            self.cost.append(cost)
        return self

    def activation(self,X):
        return self.net_input(X)

    def predict(self,xi):
        return np.where(self.activation(xi) >= 0.0,1,-1)


fig, ax = plt.subplots(nrows =1, ncols = 2, figsize=(8,3))

ada1 = AdaLineGD( eta= 0.0002).fit(X, y)
ax[0].plot(range(1, 1 + len(ada1.cost)), np.log10(ada1.cost), marker = 'o')
ax[0].set_xlabel("Epoch")
ax[0].set_ylabel("log(Sum-squared-error)")
ax[0].set_title('Adaline - Learning rate 0.0002')

ada1 = AdaLineGD(eta= 0.0001, ).fit(X, y)
ax[1].plot(range(1, 1 + len(ada1.cost)), np.log10(ada1.cost), marker = 'o')
ax[1].set_xlabel("Epoch")
ax[1].set_ylabel("log(Sum-squared-error)")
ax[1].set_title('Adaline - Learning rate 0.0001')

plt.show()

训练迭代

对比三张图明确learning rate学习率的意义:失之毫厘,差之千里
且并不是学习率越小越小,学习率为0.0002时80迭代基本收敛,学习率为0.0001时,收敛较慢,跌代100次还未完全收敛(学习率过小,每步学到的东西变少,到达收敛位置的迭代成本增加)

最小二乘法python实现_第2张图片
最小二乘法python实现_第3张图片
最小二乘法python实现_第4张图片

训练数据规范化

数据规范化之后,迭代次数显著减少,30次左右即收敛

X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean())/ X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean())/ X[:,1].std()

最小二乘法python实现_第5张图片

随机梯度下降

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from numpy.random import seed




class AdaLineGD(object):
    def __init__(self,eta = 0.01, n_iter = 30,shuffle = True, random_state=None):
        self.eta = eta
        self.n_iter = n_iter
        self.shuffle = shuffle
        self.w_inited = False
        if random_state:
            seed(random_state)

    def net_input(self,X):
        return np.dot(X, self._w[1:]) + self._w[0]

    def _shuffle(self, X, y):
        r = np.random.permutation(len(y))
        return X[r],y[r]
    def _init_weight(self, m):
        self._w = np.zeros(m + 1)
        self.w_inited = True
    def _update_weight(self, xi, target):
        output = self.net_input(xi)
        error = target - output
        self._w[1:] += self.eta * xi.dot(error)
        self._w[0] += self.eta * error
        cost =  0.5 * error ** 2
        return cost
    def fit(self, X, y):
        self._init_weight(X.shape[1])
        self.cost = []
        for _ in range(self.n_iter):
            if self.shuffle:
                X, y = self._shuffle(X, y)
            cost = []
            for xi, target in zip(X, y):
                cost.append(self._update_weight(xi, target))
            avg_cost = sum(cost) / len(y)
            self.cost.append(avg_cost)
        return self

    def partial_fit(self, X, y):
        if not self.w_inited:
            self._init_weight(X.shape[1])
        if y.ravel().shape[0] > 1:
            for xi, target in zip(X, y):
                self._update_weight(xi, target)
        else:
            self._update_weight(X, y)
        return self

    def activation(self,X):
        return self.net_input(X)

    def predict(self,xi):
        return np.where(self.activation(xi) >= 0.0,1,-1)


df = pd.read_csv('https://archive.ics.uci.edu/ml/'
                     'machine-learning-databases/iris/iris.data', header = None)

y = df.iloc[0:400,4].values
y = np.where(y == 'Iris-setosa', -1, 1)
X = df.iloc[0:400, [0,2]].values

X_std = np.copy(X)
X_std[:,0] = (X[:,0] - X[:,0].mean())/ X[:,0].std()
X_std[:,1] = (X[:,1] - X[:,1].mean())/ X[:,1].std()


fig, ax = plt.subplots(nrows =1, ncols = 1, figsize=(8,3))

ada1 = AdaLineGD(eta= 0.001).fit(X_std, y)
ax.plot(range(1, 1 + len(ada1.cost)), np.log10(ada1.cost), marker = 'o')
ax.set_xlabel("Epoch")
ax.set_ylabel("log(Sum-squared-error)")
ax.set_title('Adaline - Learning rate 0.001')


plt.show()

最小二乘法python实现_第6张图片

学习率过大出现局部震荡

最小二乘法python实现_第7张图片

你可能感兴趣的:(python)