2-3动手学习深度学习-kaggle房价预测


%matplotlib inline
import gluonbook as gb
from mxnet import autograd,gluon,init,nd
from mxnet.gluon import data as gdata,loss as gloss,nn
import pandas as pd
import numpy as np

# 读入数据
train_data=pd.read_csv('../data/kaggle_house_pred_train.csv')
test_data=pd.read_csv('../data/kaggle_house_pred_test.csv')

# print(train_data.shape) (1460, 81)
# print(test_data.shape) (1459, 80)
# train_data.iloc[0:4, [0, 1, 2, 3, -3, -2, -1]]  先按照行来索引 前 4 个样本的前 4 个特征、后 2 个特征和标签(SalePrice)
# 第一个特征是 Id,它能帮助模型记住每个训练样本
# ,但难以推广到测试样本,所以我们不使用它来训练。我们将所有的训练和测试数据的 79 个特征按样本连结

all_features=pd.concat((train_data.iloc[:,1:-1],test_data.iloc[:,1:]))
# all_features.shape  (2919, 79)


# 预处理数据
# 我们对连续数值的特征做标准化(standardization):
# 设该特征在整个数据集上的均值为  μ ,标准差为  σ 。那么,我们可以将该特征的每个值先减去  μ 再除以  σ
# 得到标准化后的每个特征值。对于缺失的特征值,我们将其替换成该特征的均值。

numeric_features=all_features.dtypes[all_features.dtypes!='object'].index
all_features[numeric_features]=all_features[numeric_features].apply(lambda x:(x-x.mean())/x.std())
all_features=all_features.fillna(all_features.mean())

# 接下来将离散数值转成指示特征。举个例子,假设特征 MSZoning 里面有两个不同的离散值 RL 和 RM,
# 那么这一步转换将去掉 MSZoning 特征,并新加两个特征 MSZoning_RL 和 MSZoning_RM,其值为 0 或 1。
# 如果一个样本原来在 MSZoning 里的值为 RL,那么有 MSZoning_RL=0 且 MSZoning_RM=1。

# dummy_na=True 将缺失值也当做合法的特征值并为其创建指示特征。
all_features=pd.get_dummies(all_features,dummy_na=True)

# all_features.shape  (2919, 331)
# 可以看到这一步转换将特征数从 79 增加到了 331。

# 最后,通过values属性得到 NumPy 格式的数据,并转成 NDArray 方便后面的训练。
n_train=train_data.shape[0]
train_features=nd.array(all_features[:n_train].values)
test_features=nd.array(all_features[n_train:].values)
train_labels=nd.array(train_data.SalePrice.values).reshape((-1,1))

# 训练模型
# 我们使用一个基本的线性回归模型和平方损失函数来训练模型。
loss=gloss.L2Loss()
def get_net():
    net=nn.Sequential()
    
    net.add(nn.Dense(64,activation='relu'),nn.Dense(1))
    net.initialize()
    return net
def log_rmse(net,train_features,train_labels):
    clipped_preds=nd.clip(net(train_features),1,float('inf'))
    rmse=nd.sqrt(2*loss(clipped_preds.log(),train_labels.log()).mean())
    return rmse.asscalar()
# Adam 优化算法。相对之前使用的小批量随机梯度下降,它对学习率相对不那么敏感。
def train(net, train_features, train_labels, test_features, test_labels,
          num_epochs, learning_rate, weight_decay, batch_size):
    train_ls, test_ls = [], []
    train_iter = gdata.DataLoader(gdata.ArrayDataset(
        train_features, train_labels), batch_size, shuffle=True)
    # 这里使用了 Adam 优化算法。
    trainer = gluon.Trainer(net.collect_params(), 'adam', {
        'learning_rate': learning_rate, 'wd': weight_decay})
    for epoch in range(num_epochs):
        for X, y in train_iter:
            with autograd.record():
                l = loss(net(X), y)
            l.backward()
            trainer.step(batch_size)
        train_ls.append(log_rmse(net, train_features, train_labels))
        if test_labels is not None:
            test_ls.append(log_rmse(net, test_features, test_labels))
    return train_ls, test_ls

# K  折交叉验证。它将被用来选择模型设计并调节超参数。以下实现了一个函数,它返回第i折交叉验证时所需要的训练和验证数据。
def get_k_fold_data(k,i,X,y):
    assert k>1
    fold_size=X.shape[0]//k   #表取整
    print( fold_size)
    print('\n')
    X_train,y_train=None,None
    for j in range(k):
        idx=slice(j*fold_size,(j+1)*fold_size)#slice() 函数实现切片对象,主要用在切片操作函数里的参数传递。返回一个切片对象。
        X_part,y_part=X[idx,:],y[idx]
        if j==i:
            X_valid,y_valid=X_part,y_part
        elif X_train is None:
            X_train,y_train=X_part,y_part
        else:
            X_train=nd.concat(X_train,X_part,dim=0)
            y_train=nd.concat(y_train,y_part,dim=0)
    return X_train,y_train,X_valid,y_valid


# 在  K 折交叉验证中我们训练  K 次并返回训练和验证的平均误差。
def k_fold(k, X_train, y_train, num_epochs,
           learning_rate, weight_decay, batch_size):
# def k_fold(k,X_train,y_train,num_eopchs,learning_rate,weight_decay,batch_size):
    train_l_sum,valid_l_sum=0,0
    for i in range(k):
        data=get_k_fold_data(k,i,X_train,y_train)
        net=get_net()
        train_ls,valid_ls=train(net,*data,num_epochs,learning_rate,  #*data表示把数据都取出来
                               weight_decay,batch_size)
        train_l_sum+=train_ls[-1]
        valid_l_sum+=valid_ls[-1]
        if i==4:
             gb.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'rmse',
                        range(1, num_epochs + 1), valid_ls,
                        ['train', 'valid'])
#         print('fold %d,train rmse: %f,valid rmse: %f'%(i,train_ls[-1],valid[-1]))
        print('fold %d, train rmse: %f, valid rmse: %f' % (
            i, train_ls[-1], valid_ls[-1]))
    return train_l_sum / k, valid_l_sum / k

k, num_epochs, lr, weight_decay, batch_size = 5, 100, 5, 0, 64
verbose_epoch = num_epochs - 2
train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr,
                         weight_decay, batch_size)
print('%d-fold validation: avg train rmse: %f, avg valid rmse: %f'
      % (k, train_l, valid_l))

# 在预测之前,我们会使用完整的训练数据集来重新训练模型,并将预测结果存成提交所需要的格式
def train_and_pred(train_features,test_features,train_labels,test_data,
                  num_epochs,lr,weight_decay,batch_size):
    net=get_net()
    train_ls,_=train(net,train_features,train_labels,None,None,
                    num_epochs,lr,weight_decay,batch_size)
    gb.semilogy(range(1,num_epochs+1),train_ls,'epochs','rmse')
    print('train rmse %f'%train_ls[-1])
    preds=net(test_features).asnumpy()
    test_data['SalePrice']=pd.Series(preds.reshape(-1,1)[0])
    submission=pd.concat([test_data['Id'],test_data['SalePrice']],axis=1)
    submission.to_csv('submission.csv',index=False)
train_and_pred(train_features, test_features, train_labels, test_data,
               num_epochs, lr, weight_decay, batch_size)

 

你可能感兴趣的:(动手学深度学习)