吴恩达 机器学习课程 coursera 第二次编程作业(Logistic Regression) python实现

本文是吴恩达机器学习课程的第二次编程作业:Logistic Regression 的基础作业,用python实现。

 

本作业包含5个文件,分别是:

ex2.py :程序的主入口

costFunction.py :计算代价函数

gradientDescent.py :梯度向下算法

predict.py :预测算法

ex2data1.txt :训练集

 

作业文件和训练集数据下载地址:https://github.com/toanoyx/MachineLearning-AndrewNg-coursera-python/tree/master/ex2%20Logistic%20Regression

 

下文是文件的源代码:
 

ex2.py :程序的主入口

"""第1部分 可视化训练集"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
import seaborn as sns
from sklearn.metrics import classification_report
from scipy import optimize

from costFunction import *
from gradientDescent import *
from predict import *

print('Plotting Data...')
data = pd.read_csv('ex2data1.txt', header=None, names=['Exam 1', 'Exam 2', 'Admitted'])
positive = data[data['Admitted'].isin([1])]
negative = data[data['Admitted'].isin([0])]

fig, ax = plt.subplots(figsize=(12,8))
ax.scatter(positive['Exam 1'], positive['Exam 2'], s=50, c='black', marker='o', label='Admitted')
ax.scatter(negative['Exam 1'], negative['Exam 2'], s=50, c='yellow', marker='x', label='Not Admitted')
ax.legend()
ax.set_xlabel('Exam 1 Score')
ax.set_ylabel('Exam 2 Score')
plt.show()

"""第2部分 逻辑回归"""
data.insert(0, 'Ones', 1)
cols = data.shape[1]
X = data.iloc[:, 0:cols-1]
y = data.iloc[:, cols-1:cols]
X = np.array(X.values)
y = np.array(y.values)
theta = np.zeros(3)
print('initial cost : ' + str(costFunction(theta, X, y)) + ' (This value should be about 0.693)')

"""第3部分 梯度下降"""
print('gradient descent : ' + str(gradientDescent(theta, X, y)))
result = opt.fmin_tnc(func=costFunction, x0=theta, fprime=gradientDescent, args=(X, y))
print('result : ' + str(result))
print('cost : ' + str(costFunction(result[0], X, y)) + ' (This value should be about 0.203)')

"""第4部分 用训练集预测和验证"""
params = np.zeros((X.shape[1], 1)).ravel()
args = (X, y)


def f(params, *args):
    X_train, y_train = args
    m, n = X_train.shape
    J = 0
    theta = params.reshape((n, 1))
    h = sigmoid(np.dot(X_train, theta))
    J = -1 * np.sum(y_train * np.log(h) + (1 - y_train) * np.log((1 - h))) / m

    return J


def gradf(params, *args):
    X_train, y_train = args
    m, n = X_train.shape
    theta = params.reshape(-1, 1)
    h = sigmoid(np.dot(X_train, theta))
    grad = np.zeros((X_train.shape[1], 1))
    grad = X_train.T.dot((h - y_train)) / m
    g = grad.ravel()
    return g


res = optimize.fmin_cg(f,x0=params,fprime=gradf,args=args,maxiter=500)


theta_min = np.matrix(result[0])
predictions = predict(theta_min, X)
print('classification report : ')
print(classification_report(y, predictions))
prob = sigmoid(np.dot(np.array([[1,45,85]]),res))
print("For a student with scores 45 and 85, we predict an admission :" + str(prob))
print("Expected value: 0.775 +/- 0.002")
correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(predictions, y)]
accuracy = (sum(map(int, correct)) % len(correct))
print('accuracy = {0}%'.format(accuracy))

"""第5部分 寻找决策边界"""
label = np.array(y)
index_0 = np.where(label.ravel() == 0)
plt.scatter(X[index_0, 1], X[index_0, 2], marker='x', color='yellow', label='Not admitted', s=30)
index_1 = np.where(label.ravel() == 1)
plt.scatter(X[index_1, 1], X[index_1, 2], marker='o', color='black', label='Admitted', s=30)
x1 = np.arange(20, 100, 0.5)
x2 = (- res[0] - res[1]*x1) / res[2]
plt.plot(x1, x2, color='black')
plt.xlabel('Exam 1 Score')
plt.ylabel('Exam 2 Score')
plt.legend(loc='upper left')
plt.show()

 

sigmoid.py :

import numpy as np
import matplotlib.pyplot as plt


def sigmoid(z):
    return 1 / (1 + np.exp(-z))

 

costFunction.py :计算代价函数

from sigmoid import *


def costFunction(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
    second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
    return np.sum(first - second) / (len(X))

 

gradientDescent.py :梯度向下算法

import numpy as np
from sigmoid import *


def gradientDescent(theta, X, y):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)

    parameters = int(theta.ravel().shape[1])
    grad = np.zeros(parameters)

    error = sigmoid(X * theta.T) - y

    for i in range(parameters):
        term = np.multiply(error, X[:, i])
        grad[i] = np.sum(term) / len(X)

    return grad

 

predict.py :预测算法

from sigmoid import *


def predict(theta, X):
    probability = sigmoid(X * theta.T)
    return [1 if x >= 0.5 else 0 for x in probability]

 

你可能感兴趣的:(机器学习,机器学习)