逻辑回归 批梯度下降和随机梯度下降 python实现

import numpy as np

def sigmoid(x):
    return 1.0 / (1 +  np.exp(-x))

def batch_grad_ascent(X_train, y_train, alpha, n_iter):
    “”“批梯度下降”“”
    X_train = np.mat(X_train)
    y_train = np.mat(y_train).transpose()
    m, n = shpae(X_train)
    weighs = np.ones((n, 1))
    for k in range(n_iter):
        hx = sigmoid(weighs * X_train)
        error = y_train - hx
        weighs += alpha * X_train.transpose() * error
    return weights

def stoc_grad_ascent(X_train, y_train, alpha):
   “”“随机梯度下降”“”
    m, n = shape(X_train)
    weights = np.ones((n, 1))
    for i in range(m):
        h = sigmoid(np.sum(weights * X_train[i]))
        error = y_train[i] - h
        weights += alpha * error * X_train[i]
    return weights

你可能感兴趣的:(逻辑回归 批梯度下降和随机梯度下降 python实现)