【逻辑回归】

逻辑回归

数据加载

import pandas as pd
import numpy as np
data = pd.read_csv(‘examdata.csv’)

from matplotlib import pyplot as plt
fig1 = plt.figure()
plt.scatter(data.loc[:,‘Exam1’],data.loc[:,‘Exam2’])
plt.title(‘Exam1-Exam2’)
plt.xlabel(‘Exam1’)
plt.ylabel(‘Exam2’)
plt.show()

mask=data.loc[:,‘Pass’]==1
print(~mask)

fig2 = plt.figure()
passed=plt.scatter(data.loc[:,‘Exam1’][mask],data.loc[:,‘Exam2’][mask])
failed=plt.scatter(data.loc[:,‘Exam1’][mask],data.loc[:,‘Exam2’][mask])
plt.title(‘Exam1-Exam2’)
plt.xlabel(‘Exam1’)
plt.ylabel(‘Exam2’)
plt.legend((passed,failed),(‘passed’,‘failed’))
plt.show()

X = data.drop([‘Pass’],axis=1)
y = data.loc[:,‘Pass’]
X1 = data.loc[:,‘Exam1’]
X2 = data.loc[:,‘Exam2’]
X1.head()

建立模型

from sklearn.linear_model import LogisticRegression
LR = LogisticRegression()
LR.fit(X,y)

y_predict = LR.predict(X)
print(y_predict)

from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y,y_predict)
print(accuracy)

y_test = LR.predict([[70,65]])
print( 'passed’if y_test==1 else ‘failed’)

LR.intercept_
theta0 = LR.intercept_
theta1,theta2 = LR.coef_[0][0],LR.coef_[0][1]
print(theta0,theta1,theta2)

X2_new = -(theta0+theta1*X1)/theta2
print(X2_new)

fig3 = plt.figure()
passed=plt.scatter(data.loc[:,‘Exam1’][mask],data.loc[:,‘Exam2’][mask])
failed=plt.scatter(data.loc[:,‘Exam1’][mask],data.loc[:,‘Exam2’][mask])
plt.plot(X1,X2_new)
plt.title(‘Exam1-Exam2’)
plt.xlabel(‘Exam1’)
plt.ylabel(‘Exam2’)
plt.legend((passed,failed),(‘passed’,‘failed’))
plt.show()

二阶

X1_2 = X1X1
X2_2 = X2
X2
X1_X2 = X1*X2

X_new = {‘X1’:X1,‘X2’:X2,‘X1_2’:X1_2,‘X2_2’:X2_2,‘X1_X2’:X1_X2}
X_new = pd.DataFrame(X_new)
print(X_new)

LR2 = LogisticRegression()
LR2.fit(X_new,y)
y2_predict = LR2.predict(X_new)
accuracy2 = accuracy_score(y,y2_predict)
print(accuracy2)

X1_new = X1.sort_values()
print(X1,X1_new)

你可能感兴趣的:(人工智能,python,人工智能)