一个非常经典的机器学习实战题目。
通过python,运用线性回归、逻辑回归、随机森林、堆叠算法等处理数据判断船员存活情况。
代码在唐宇迪教程的代码的基础上进行了修改,主要结合用到的库的更新做出一定改动,以适应目前版本。全部代码都运行过,确保近期可用。
import pandas
titanic = pandas.read_csv("titanic_train.csv")
titanic.head(6)
print(titanic.describe())
#用均值填充缺失年龄
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())
titanic.head(6)
#显示有哪些可能性
print(titanic["Sex"].unique())
#默认进行替换 用数字替换Sex栏的值
titanic.loc[titanic["Sex"]=="male","Sex"]=0
titanic.loc[titanic["Sex"]=="female","Sex"]=1
#字符型 且有NaN值 哪一个最多就用哪一个填充空缺
#全都换成0 1 2
print(titanic["Embarked"].unique())
titanic["Embarked"]=titanic["Embarked"].fillna('S')
titanic.loc[titanic["Embarked"]== "S","Embarked"]=0
titanic.loc[titanic["Embarked"]== "C","Embarked"]=1
titanic.loc[titanic["Embarked"]== "Q","Embarked"]=2
#导入线性回归、交叉验证所需库
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
#为了预测结果需要使用的栏
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
#初始化算法类(algorithm class)
alg = LinearRegression()
#对泰坦尼克号数据集进行交叉验证。返回对应于train和test的行索引。
#把n_folds改为n_splits,并删去其他参数
kf = KFold(n_splits=3)
predictions = []
for train, test in kf.split(titanic):
#用到的特征的数据
train_predictors = (titanic[predictors].iloc[train,:])
#对应位置的survived值
train_target = titanic["Survived"].iloc[train]
#训练
alg.fit(train_predictors, train_target)
#预测
test_predictions = alg.predict(titanic[predictors].iloc[test,:])
#添加到结果集
predictions.append(test_predictions)
#print(predictions)
打印准确率
import numpy as np
#连接
predictions = np.concatenate(predictions, axis=0)
#判断存活 并全部化为0、1表示
predictions[predictions > .5] = 1
predictions[predictions <=.5] = 0
#准确率
accuracy = sum(predictions == titanic["Survived"]) / len(predictions)
print (accuracy)
用真正的测试集进行测试
titanic_test = pandas.read_csv("test.csv")
titanic_test["Age"] = titanic_test["Age"].fillna(titanic["Age"].median())
titanic_test["Fare"] = titanic_test["Fare"].fillna(titanic_test["Fare"].median())
titanic_test.loc[titanic_test["Sex"] == "male", "Sex"] = 0
titanic_test.loc[titanic_test["Sex"] == "female", "Sex"] = 1
titanic_test["Embarked"] = titanic_test["Embarked"].fillna("S")
titanic_test.loc[titanic_test["Embarked"] == "S", "Embarked"] = 0
titanic_test.loc[titanic_test["Embarked"] == "C", "Embarked"] = 1
titanic_test.loc[titanic_test["Embarked"] == "Q", "Embarked"] = 2
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
prediction =alg.predict(titanic_test[predictors])
prediction[prediction > .5] = 1
prediction[prediction <=.5] = 0
accuracy = sum(prediction == titanic_test["Survived"]) / len(prediction)
print(accuracy)
#from sklearn import cross_validation cross_validation不能用了,用model_delection替代
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
# 初始化算法
alg = LogisticRegression()
#用自带的函数直接计算所有交叉验证的准确度得分
scores = model_selection.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=3)
#取平均值
print(scores.mean())
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier#首推使用随机森林
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked"]
#10:树的个数 2:当样本数是2的时候不切 1:叶子节点个数最少1个
alg = RandomForestClassifier(n_estimators=10, min_samples_split=2, min_samples_leaf=1)
#用自带的函数直接计算所有交叉验证的准确度得分
kf = model_selection.KFold(n_splits=3)
scores = model_selection.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=kf)
#取平均值
print(scores.mean())
调整参数
#调整参数 扩大树的数量 减少过拟合情况
#需要不断调整以找到最优参数
alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=4, min_samples_leaf=2)
kf = model_selection.KFold(n_splits=3)
scores = model_selection.cross_val_score(alg, titanic[predictors], titanic["Survived"], cv=kf)
print(scores.mean())
新特征:家人数目、姓名长度
#提取新的特征
# 生成一个familysize栏
titanic["FamilySize"] = titanic["SibSp"] + titanic["Parch"]
#把名字长度也作为一个特征
titanic["NameLength"] = titanic["Name"].apply(lambda x: len(x))
新特征:头衔
#从名称中获取头衔的函数
def get_title(name):
#使用正则表达式搜索头衔,头衔总是由大写字母和小写字母组成,并以'.'结尾
title_search = re.search(' ([A-Za-z]+)\.', name)
#如果头衔存在,提取并返回
if title_search:
return title_search.group(1)
return ""
#获取所有头衔并打印每个头衔出现的频率
titles = titanic["Name"].apply(get_title)
print(pandas.value_counts(titles))
#将每个头衔映射到一个整数
title_mapping = {
"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Dr": 5, "Rev": 6, "Major": 7, "Col": 7, "Mlle": 8, "Mme": 8, "Don": 9, "Lady": 10, "Countess": 10, "Jonkheer": 10, "Sir": 9, "Capt": 7, "Ms": 2}
for k,v in title_mapping.items():
titles[titles == k] = v
#验证我们是否转换了所有内容
print(pandas.value_counts(titles))
#增加一个头衔栏
titanic["Title"] = titles
import numpy as np
from sklearn.feature_selection import SelectKBest, f_classif
import matplotlib.pyplot as plt
predictors = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Fare", "Embarked", "FamilySize", "Title", "NameLength"]
#进行特征选择
selector = SelectKBest(f_classif, k=5)
selector.fit(titanic[predictors], titanic["Survived"])
#获取每个特征的原始p值,并将p值转换为分数
scores = -np.log10(selector.pvalues_)
#画图
plt.bar(range(len(predictors)), scores)
plt.xticks(range(len(predictors)), predictors, rotation='vertical')
plt.show()
#只选择四个最好的特征
predictors = ["Pclass", "Sex", "Fare", "Title"]
alg = RandomForestClassifier(random_state=1, n_estimators=50, min_samples_split=8, min_samples_leaf=4)
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np
#将要集成的算法
#在逻辑回归中加了一些参数
algorithms = [
[GradientBoostingClassifier(n_estimators=25, max_depth=3), ["Pclass", "Sex", "Age", "Fare", "Embarked", "FamilySize", "Title",]],
[LogisticRegression(), ["Pclass", "Sex", "Fare", "FamilySize", "Title", "Age", "Embarked"]]
]
#初始化交叉验证
kf = KFold(n_splits=3)
predictions = []
for train, test in kf.split(titanic):
train_target = titanic["Survived"].iloc[train]
full_test_predictions = []
for alg, predictors in algorithms:
#训练
alg.fit(titanic[predictors].iloc[train,:], train_target)
#选择并预测
#.astype(float)将dataframe转换为float并避免sklearn报错
test_predictions = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1]
full_test_predictions.append(test_predictions)
# 使用一个简单的组合方案——用预测值的平均值得到最终的分类
test_predictions = (full_test_predictions[0] + full_test_predictions[1]) / 2
test_predictions[test_predictions <= .5] = 0
test_predictions[test_predictions > .5] = 1
predictions.append(test_predictions)
#把所有的预测放在一个数组中
predictions = np.concatenate(predictions, axis=0)
#准确率
accuracy = sum(predictions == titanic["Survived"]) / len(predictions)
print(accuracy)