工具:Pycharm,Win10,Python3.6.4
上图是我们的数据文件,最后一列是附近有无超市的标签,1代表有,-1代表没有。可以发现数据维度比较多,我通关计算相关系数提出相关性低的特征。
data = pd.read_excel('data.xls')
# print(data)
column = data.columns.tolist()[:19]
# print(column)
mcorr = data.corr()
mcorr_data = np.array(mcorr.标签)
# print(mcorr_data)
mask = np.zeros_like(mcorr, dtype=np.bool) # 构造与mcorr同维矩阵 为bool型
mask[np.triu_indices_from(mask)] = True # 角分线右侧为True
# 绘制图像
plt.figure(figsize=(24, 24))
cmap = sns.diverging_palette(220, 10, as_cmap=True) # 返回matplotlib colormap对象
g = sns.heatmap(mcorr, mask=mask, cmap=cmap, square=True, annot=True, fmt='0.2f') # 热力图
plt.show()
#删除相关性小的特征
x = pd.DataFrame(data.drop(['风景名胜', '公共设施', '租赁成本'], axis=1))
相关矩阵图如上,我们可以看到有些特征相关性比较低,如公共设施,风景名胜,租赁成本比较低,所以我们可以直接删除这些特征。
下面就是调用sklearn库去预测数据,我们主要使用SVM,KNN,随机森林,发现SVM和随机森林结果略优于KNN
import pandas as pd
import sklearn
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
plt.rcParams['font.family'] = 'SimHei' # 配置中文字体
plt.rcParams['font.size'] = 15 # 更改默认字体大小
data = pd.read_excel('data.xls')
# print(data)
column = data.columns.tolist()[:19]
# print(column)
mcorr = data.corr()
mcorr_data = np.array(mcorr.标签)
# print(mcorr_data)
mask = np.zeros_like(mcorr, dtype=np.bool) # 构造与mcorr同维矩阵 为bool型
mask[np.triu_indices_from(mask)] = True # 角分线右侧为True
# 绘制图像
plt.figure(figsize=(24, 24))
cmap = sns.diverging_palette(220, 10, as_cmap=True) # 返回matplotlib colormap对象
g = sns.heatmap(mcorr, mask=mask, cmap=cmap, square=True, annot=True, fmt='0.2f') # 热力图
plt.show()
#删除相关性小的特征
x = pd.DataFrame(data.drop(['风景名胜', '公共设施', '租赁成本'], axis=1))
#对数据做归一化处理
x_normal = (x - x.min()) / (x.max() - x.min())
# print(x_normal)
Y = np.array(x_normal.标签)
X = np.array(pd.DataFrame(x_normal.drop(['标签'] , axis = 1),index=None))
# print(Y)
# print(X)
#分割训练集和测试集
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.2,random_state=42)
from sklearn.svm import SVC
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
clf = SVC(kernel='rbf', class_weight='balanced',)
clf.fit(X_train, Y_train)
y_predict = clf.predict(X_test)
error = 0
for i in range(len(X_test)):
if clf.predict([X_test[i]])[0] != Y_test[i]:
error +=1
print( 'SVM错误率: %.4f' % (error/float(len(X_test))))
print( 'SVM精确率: ', precision_score(Y_test, y_predict, average='macro'))
print( 'SVM召回率: ', recall_score(Y_test, y_predict, average='macro'))
print( 'F1: ', f1_score(Y_test, y_predict, average='macro'))
from sklearn.neighbors import KNeighborsClassifier as KNN
knc = KNN(n_neighbors =6,)
knc.fit(X_train,Y_train)
y_predict = knc.predict(X_test)
print('KNN准确率',knc.score(X_test,Y_test))
print('KNN精确率',precision_score(Y_test, y_predict, average='macro'))
print('KNN召回率',recall_score(Y_test, y_predict, average='macro'))
print('F1',f1_score(Y_test, y_predict, average='macro'))
from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier()
rfc.fit(X_train, Y_train)
y_predict = rfc.predict(X_test)
print('随机森林准确率',rfc.score(X_test, Y_test))
print('随机森林精确率',precision_score(Y_test, y_predict, average='macro'))
print('随机森林召回率',recall_score(Y_test, y_predict, average='macro'))
print('F1',f1_score(Y_test, y_predict, average='macro'))
代码中我们还计算了精确率,召回率F1。这些概念不再细讲,大家可以搜一些博客自行学习。