#行删除
train = train.drop(train[(train['GrLivArea']>4000)].index)
#列删除
train.drop("Id", axis = 1, inplace = True)
#并重设索引,列相同
all_data = pd.concat((train, test)).reset_index(drop=True)
#当axis = 1的时候,concat就是行对齐,然后将不同列名称的两张表合并
result = pd.concat([df1, df4], axis=1)
#加上join参数的属性,如果为’inner’得到的是两表的交集(指取索引相同的行,列进行扩展),如果是outer,得到的是两表的并集。
result = pd.concat([df1, df4], axis=1, join='inner')
#统计每一列的缺失数,isnull()返回的是true和false的表,也就是1和0
all_data_na = all_data.isnull().sum()
#填充缺失数据
all_data[col] = all_data[col].fillna("None")
#众数填充,mode取出的是众数的集合,所以取第一个
all_data['X'] = all_data['X'].fillna(all_data['X'].mode()[0])
#中值填充,用Neighborhood将数据分组,每组缺失值用LotFrontage中值填充
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(lamb
聚合
#聚合出来的是行数据,transform对列操作
all_data["LotFrontage"] = all_data.groupby("Neighborhood")["LotFrontage"].transform(
lambda x: x.fillna(x.median()))
#loc位置取值
all_data.loc[all_data['X']<=1002.5, 'X'] = 1
#iloc取值
full['Age'].iloc[i]
#修改数据类型
all_data['MSSubClass'] = all_data['MSSubClass'].apply(str)
#astype()
all_data['OverallCond'] = all_data['OverallCond'].astype(str)
#图的大小和位置设置好
plt.subplots(figsize =(15, 5))
plt.subplot(1, 2, 1)
#散点图,可拟合出回归线,用fit_reg控制
g = sns.regplot(x=train['GrLivArea'], y=train['SalePrice'], fit_reg=True).set_title("Before")
#计数图
g = sns.countplot(x = "Utilities", data = train).set_title("Utilities - Training")
#相关热图
corr = train.corr()
cmap = sns.diverging_palette(150, 250, as_cmap=True)
sns.heatmap(corr, cmap="RdYlBu", vmax=1, vmin=-0.6, center=0.2, square=True, linewidths=0, cbar_kws={"shrink": .5}, annot = True)
#箱线图
sns.boxplot(x="BsmtQual", y="SalePrice", data=train, order=['Fa', 'TA', 'Gd', 'Ex'])
#点图
sns.stripplot(x="BsmtQual", y="SalePrice", data=train, size = 5, jitter = True, order=['Fa', 'TA', 'Gd', 'Ex'])
#柱图
sns.barplot(x="BsmtQual", y="SalePrice", data=train, order=['Fa', 'TA', 'Gd', 'Ex'])
#柱图及曲线拟合
g = sns.distplot(train['SalePrice'], fit=norm, label = "Skewness : %.2f"%(train['SalePrice'].skew()));
g = g.legend(loc="best")
#画特征分布,修正为更接近正态
g = sns.distplot(full["Fare"], color="#76CDE9", label="Skewness : %.2f"%(full["Fare"].skew()))
g = g.legend(loc="best");
full["Fare"] = full["Fare"].map(lambda i: np.log(i) if i > 0 else 0)
#两属性列相关图,可以观察两者间的趋势
g = sns.kdeplot(full["Age"][(full["Survived"] == 0)], color="#76CDE9", shade = True)
g = sns.kdeplot(full["Age"][(full["Survived"] == 1)], ax = g, color="#FFDA50", shade= True)
g.set_xlabel("Age")
g.set_ylabel("Frequency")
g = g.legend(["Not Survived","Survived"])
#因子图
g = sns.factorplot(x = "Pclass", y = "Survived", data = full, kind = "bar", palette = mycols, size = 10, aspect = 1.5)
g.despine(left=True)
g = g.set_ylabels("Survival probability")
#设定网格
grid = plt.GridSpec(2, 3, wspace=0.1, hspace=0.15)
plt.subplot(grid[0, 0])
plt.subplot(grid[0, 1:])
#柱状图
DataFrame.plot(kind='bar')
#替换各个值
all_data['X'] = all_data['X'].map({"None":0, "Fa":1, "TA":2, "Gd":3, "Ex":4})
#取该列的枚举值
all_data['X'].unique()
#用cut划分为等间隔的区间
all_data['X_Band'] = pd.cut(all_data['X'], 4)
#[(-4.01, 1002.5], (1002.5, 2005.0], (2005.0, 3007.5], (3007.5, 4010.0]]
'''
离散特征的编码分为两种情况:
1、离散特征的取值之间没有大小的意义,比如color:[red,blue],那么就使用one-hot编码
2、离散特征的取值有大小的意义,比如size:[X,XL,XXL],那么就使用数值的映射{X:1,XL:2,XXL:3}
'''
all_data = pd.get_dummies(all_data, columns = ["X"], prefix="X")
#使用 LabelEncoder()将属性转化为数字
from sklearn.preprocessing import LabelEncoder
for c in cols:
lbl = LabelEncoder()
lbl.fit(list(all_data[c].values))
all_data[c] = lbl.transform(list(all_data[c].values))
#检查偏度
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index
# Check how skewed they are
skewed_feats = all_data[numeric_feats].apply(lambda x: skew(x.dropna())).sort_values(ascending=False)
print("\nSkew in numerical features: \n")
skewness = pd.DataFrame({'Skew' :skewed_feats})
skewness.head(10)
skewness = skewness[abs(skewness) > 0.75]
print("There are {} skewed numerical features to Box Cox transform".format(skewness.shape[0]))
#矫正偏度
from scipy.special import boxcox1p
skewed_features = skewness.index
lam = 0.15
for feat in skewed_features:
#all_data[feat] += 1
all_data[feat] = boxcox1p(all_data[feat], lam)
#一般是将训练集划出一部分做验证集
model_selection.train_test_split(xgb_train, y_train, test_size=0.3, random_state=42)
#打乱后划分数据数据,再交叉验证
shuff = ShuffleSplit(n_splits=3, test_size=0.2, random_state=50)
dt = DecisionTreeClassifier(random_state=0)
dt.fit(X_train, Y_train)
dt_scores = cross_val_score(dt, X_train, Y_train, cv = shuff)
dt_scores = dt_scores.mean()
dt_apply_acc = metrics.accuracy_score(Y_test, dt.predict(X_test))
#单变量选取:对于regression问题,使用f_regression指标;对于classification问题,可以使用chi2或者f_classif指标。取值必须是非负数。
Kbest = SelectKBest(score_func=chi2, k=10)
fit = Kbest.fit(X_train, Y_train)
scores = pd.DataFrame({'Columns': X_test.columns.values, 'Score': fit.scores_})
#树特征选择
model = ExtraTreesClassifier()
model.fit(X_train, Y_train)
scores = pd.DataFrame({'Columns': X_test.columns.values, 'Score': model.feature_importances_})
scores.sort_values(by='Score', ascending=False)
#部分模型包含feature_importances,可以排序后选择特征
indices = np.argsort(gbc.feature_importances_)[::-1]
g = sns.barplot(y=X_train.columns[indices], x = gbc.feature_importances_[indices], orient='h', palette = mycols)
#或者
xgb = xgb.fit(xgb_red_train, Y_train)
xgb_feat_red = SelectFromModel(xgb, prefit = True)
xgb_X_train = xgb_feat_red.transform(xgb_red_train)
xgb_X_test = xgb_feat_red.transform(xgb_red_test)
xgb_final_test = xgb_feat_red.transform(xgb_final_test)