第三章.逻辑回归
正确率(Precision)和召回率(Recall)广泛应用于信息检索和统计学分类领域的两个度量值,用来评价结果的质量。
F1=2*((正确率*召回率)/(正确率+召回率))
正确率与召回率指标有时候会出现矛盾的情况,这就需要综合考虑他们,最常见的方法就是F-Measure(又称F-Score)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn import preprocessing
from sklearn.preprocessing import PolynomialFeatures # 产生多项式的
# 数据是否是要标准化
scale = False
# 加载数据
data = np.genfromtxt('D:\\Data\\LR-testSet2.csv', delimiter=',')
# 数据切片
x_data = data[:, :-1]
y_data = data[:, -1, np.newaxis]
# 绘制散点图
def plot():
x0 = []
y0 = []
x1 = []
y1 = []
# 切分不同类别的数据
for i in range(len(x_data)):
if y_data[i] == 0:
x0.append(x_data[i, 0])
y0.append(x_data[i, 1])
else:
x1.append(x_data[i, 0])
y1.append(x_data[i, 1])
scatter0 = plt.scatter(x0, y0, c='b', marker='o')
scatter1 = plt.scatter(x1, y1, c='r', marker='x')
plt.legend(handles=[scatter0, scatter1], labels=['label1', 'label2'])
plot()
plt.show()
# sigmoid函数
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# 代价函数
def cost(xMat, yMat, ws):
value1 = np.multiply(yMat, np.log(sigmoid(xMat * ws)))
value2 = np.multiply(1 - yMat, np.log(1 - sigmoid(xMat * ws)))
return np.sum(value1 + value2) / -(len(xMat))
# 梯度下降法
def gradAscent(xArr, yArr):
if scale == True:
xArr = preprocessing.scale(xArr)
xMat = np.mat(xArr)
yMat = np.mat(yArr)
lr = 0.03
epochs = 50000
costList = []
# 计算数据列数,有几列就有几个权值
m, n = np.shape(xMat)
# 初始化权值
ws = np.mat(np.ones((n, 1)))
for i in range(epochs + 1):
h = sigmoid(xMat * ws)
# 计算误差
ws_grad = xMat.T * (h - yMat) / m
ws = ws - lr * ws_grad
if i % 50 == 0:
costList.append(cost(xMat, yMat, ws))
return ws, costList
# 定义多项式回归,degree的值可以调节多项式的特征
poly_reg = PolynomialFeatures(degree=3)
# 特征处理
x_poly = poly_reg.fit_transform(x_data)
# 训练模型
ws, costList = gradAscent(x_poly, y_data)
# 获取数据值所在的范围
x_min, x_max = x_data[:, 0].min() - 1, x_data[:, 0].max() + 1
y_min, y_max = x_data[:, 1].min() - 1, x_data[:, 1].max() + 1
# 生出网格矩阵
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
# 特征处理+维度转换
z = sigmoid(poly_reg.fit_transform(np.c_[xx.ravel(), yy.ravel()]).dot(np.array(ws)))
for i in range(len(z)):
if z[i] >= 0.5:
z[i] = 1
else:
z[i] = 0
z = z.reshape(xx.shape)
# 等高线图
cs = plt.contourf(xx, yy, z)
plot()
plt.show()
# 预测
def predict(x_data, ws):
if scale == True:
x_data = preprocessing(x_data)
xMat = np.mat(x_data)
ws = np.mat(ws)
return [1 if x >= 0.5 else 0 for x in sigmoid(xMat * ws)]
predictions = predict(x_poly, ws)
print(classification_report(y_data, predictions))
①.数据
②.图像
import matplotlib.pyplot as plt
import numpy as np
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.datasets import make_gaussian_quantiles
# 生成二维正态分布,生成的数据按分位数分为两类,500个样本,2个样本特征
# 可以生成两类或多类数据
x_data, y_data = make_gaussian_quantiles(n_samples=500, n_features=2, n_classes=2)
plt.scatter(x_data[:, 0], x_data[:, 1], c=y_data)
plt.show()
# 定义多项式回归,degree的值可以调节多形式的特征
poly_reg = PolynomialFeatures(degree=5)
# 特征处理
x_poly = poly_reg.fit_transform(x_data)
# 定义并训练模型
logistic = linear_model.LogisticRegression()
logistic.fit(x_poly, y_data)
# 获取数值所在范围
x_min, x_max = x_data[:, 0].min() - 1, x_data[:, 0].max() + 1
y_min, y_max = x_data[:, 1].min() - 1, x_data[:, 1].max() + 1
# 生成网络矩阵
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
z = logistic.predict(poly_reg.fit_transform(np.c_[xx.ravel(), yy.ravel()]))
z = z.reshape(xx.shape)
# 绘制等高线
cs = plt.contourf(xx, yy, z)
# 样本散点图
plt.scatter(x_data[:, 0], x_data[:, 1], c=y_data)
plt.show()
print('score:', logistic.score(x_poly, y_data))
①.数据
②.图像