草稿箱里面很多文章都是初稿,是之前在学习相关内容的时候写的,本身就是做备忘录了,准备这段时间把草稿箱里面的文章发出去。
提供每日历史销售数据。 任务是为测试集预测每个商店销售的产品总量。 请注意,商店和产品列表每个月都会略有变化。 创建一个可以处理此类情况的稳健模型是挑战的一部分。
提交的内容通过均方根误差 (RMSE) 进行评估。 真正的目标值被裁剪到 [0,20] 范围内。
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
import xgboost as xgb
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
%matplotlib inline
df_sales = pd.read_csv('sales_train.csv')
df_items = pd.read_csv('items.csv')
df_shops = pd.read_csv('shops.csv')
df_test = pd.read_csv('test.csv')
df_sub = pd.read_csv('sample_submission.csv')
df_sales.drop_duplicates(keep='first', inplace=True, ignore_index=True)
df_sales.shape
plt.figure(figsize=(10,4))
plt.xlim(-100, 3000)
sns.boxplot(x=df_sales.item_cnt_day)
plt.figure(figsize=(10,4))
plt.xlim(df_sales.item_price.min(), df_sales.item_price.max()*1.1)
sns.boxplot(x=df_sales.item_price)
df_sales[df_sales['item_price'] <0]
df_sales.drop(df_sales[df_sales['item_cnt_day'] <0].index , inplace=True)
df_sales.drop(df_sales[df_sales['item_price'] <0].index , inplace=True)
df_sales.shape
Q1 = np.percentile(df_sales['item_price'], 25.0)
Q3 = np.percentile(df_sales['item_price'], 75.0)
IQR = Q3 - Q1
df_sub1 = df_sales[df_sales['item_price'] > Q3 + 1.5*IQR]
df_sub2 = df_sales[df_sales['item_price'] < Q1 - 1.5*IQR]
df_sales.drop(df_sub1.index, inplace=True)
df_sales.shape
dict(round(df_sales.groupby('date_block_num')['item_price'].mean(),4))
price = round(np.array(df_sales.groupby('date_block_num')['item_price'].mean()).mean(),2)
print(price)
replace_dict = dict(round(df_sales.groupby('date_block_num')['item_price'].mean(),2))
df_sales['date_block_num'] = df_sales['date_block_num'].replace(replace_dict)
df_train = df_sales.copy()
df_train.drop(['date','item_price'], axis=1, inplace=True)
df_train.rename(columns = {'date_block_num':'mean_price_by_column'}, inplace=True)
df_train.head()
mean_price = np.array(df_sales.groupby('date_block_num')['item_price'].mean()).mean()
com_df = pd.concat([df_train,df_test])
com_df['mean_price_by_column'] = com_df['mean_price_by_column'].fillna(value=price)
com_df['item_cnt_day'] = com_df['item_cnt_day'].fillna(value=0)
test_df = com_df[com_df['item_cnt_day'] == 0]
train_df = com_df[com_df['item_cnt_day'] != 0]
traindf = train_df.copy()
traindf.drop('ID', inplace=True, axis=1)
testdf = test_df.copy()
testdf.drop('ID', inplace=True, axis=1)
testdf.drop('item_cnt_day', inplace=True, axis=1)
traindf = train_df.copy()
traindf.drop('ID', inplace=True, axis=1)
traindf['item_id'] = (traindf['item_id'] - traindf['item_id'].mean())/traindf['item_id'].std()
这里只写了xgboost的代码,lightgbm和catboost 的代码可以在下面的代码基础上直接修改就好了。
X_train, X_valid, y_train, y_valid = train_test_split(X, y,train_size=0.8, random_state= 42)
dtrain = xgb.DMatrix(data=X_train,label=y_train)
dtest = xgb.DMatrix(data=X_valid,label=y_valid)
model = xgb.XGBRegressor(
max_depth=8,
n_estimators=100,
min_child_weight=30,
colsample_bytree=0.8,
subsample=0.8,
eta=0.1,
seed=42,
eval_metric="rmse",
validate_parameters = False,
early_stopping_rounds = 10,
verbosity=0,
)
# res = xgb.cv(model.get_params(), dtrain, num_boost_round=10, nfold=5,
# callbacks=[xgb.callback.EvaluationMonitor(show_stdv=False),
# xgb.callback.EarlyStopping(3)])
fit_model = model.fit(
X_train,
y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
verbose=True,)
# cv的可视化
plt.figure(figsize=(10,10))
plt.plot(np.arange(1,res.shape[0]+1,1), res["train-rmse-mean"])
plt.plot(np.arange(1,res.shape[0]+1,1), res["test-rmse-mean"])
plt.title("rmse plot")
plt.legend(['train','test']) #打出图例
plt.show()
import hyperopt
from hyperopt import fmin, tpe, hp, partial
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import mean_squared_error, zero_one_loss
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning)
train_rmse_list = []
test_rmse_list = []
def hyperopt_objective(params):
global train_rmse_list
global test_rmse_list
model = xgb.XGBRegressor(
max_depth=int(params['max_depth'])+1,
learning_rate=params['learning_rate'],
n_estimators=params['n_estimators'],
silent=1,
objective='reg:squarederror',
eval_metric='rmse',
seed=42,
nthread=-1,
enable_categorical = False,
verbosity=0,
)
res = xgb.cv(model.get_params(), dtrain, num_boost_round=10, nfold=5,verbose_eval = True
)
train_rmse_list += list(res["train-rmse-mean"])
test_rmse_list += list(res["test-rmse-mean"])
return np.min(res['test-rmse-mean']) # as hyperopt minimises
from numpy.random import RandomState
params_space = {
'max_depth': hp.randint('max_depth', 6),
'learning_rate': hp.uniform('learning_rate', 1e-3, 5e-1),
"n_estimators": hp.randint("n_estimators", 300),
}
trials = hyperopt.Trials()
best = fmin(
hyperopt_objective,
space=params_space,
algo=hyperopt.tpe.suggest,
max_evals=10,
trials=trials,
)
print("\n展示hyperopt获取的最佳结果,但是要注意的是我们对hyperopt最初的取值范围做过一次转换")
print(best)
plt.figure(figsize=(10,10))
plt.plot(np.arange(1,len(train_rmse_list)+1,1), train_rmse_list)
plt.plot(np.arange(1,len(test_rmse_list)+1,1), test_rmse_list)
plt.title("rmse plot")
plt.legend(['train','test']) #打出图例
plt.show()
回顾: