P15 实战:Kaggle房价预测

1.实现几个函数来方便下载数据(自己定义函数,下载并返回文件名)

import需要的库 + 找到数据的位置

import hashlib
import os
import tarfile
import zipfile
import requests

DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
使用download函数下载数据集,将数据集存在本地(默认情况为.../data),并返回下载文件夹的名称
def download(name,cache_dir=os.path.join('..','data')):
    assert name in DATA_HUB, f"{name}不存在于{DATA_HUB}"
    url,shal_hash = DATA_HUB[name]
    os.makedirs(cache_dir,exist_ok=True)
    fname= os.path.join(cache_dir,url.split('/')[-1])
    if os.path.exists(fname):
        shal = hashlib.sha1()
        with open(fname,'rb') as f:
            while True:
                data = f.read(1048576)
                if not data:
                    break
                shal.update(data)
        if shal.hexdigest() == shal_hash:
            return fname #命中缓存
    print(f'is downloading from {url}, which is {fname}')
    r = requests.get(url,stream=True,verify=True)
    with open(fname,'wb') as f:
        f.write(r.content)
    return fname
再定义2个函数,一个用来下载并且解压缩zip或者tar文件;一个将书里使用的所有数据集从DATA_HUB下载到缓存目录中
def download_extract(name,folder=None): #下载并且解压缩zip或者tar文件
    fname = download(name)
    base_dir = os.path.dirname(fname)
    data_dir,ext = os.path.splitext(fname)
    if ext == '.zip':
        fp = zipfile.ZipFile(fname, 'r')
    elif ext in ('.tar', '.gz'):
        fp = tarfile.open(fname, 'r')
    else:
        assert False, '只有zip/tar⽂件可以被解压缩'
    fp.extractall(base_dir)
    return os.path.join(base_dir, folder) if folder else data_dir

def download_all(): #下载DATA_HUB中的所有⽂件
    for name in DATA_HUB:
        download(name)

2.用pandas读入并处理数据

import numpy as np
import pandas as pd
import torch
from torch import nn
from d2l import torch as d2l

DATA_HUB['kaggle_house_train'] = ( DATA_URL + 'kaggle_house_pred_train.csv','585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = ( DATA_URL + 'kaggle_house_pred_test.csv','fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
#用pandas加载包含训练数据和测试数据的2个csv文件
train_data = pd.read_csv(download('kaggle_house_train'))
test_data = pd.read_csv(download('kaggle_house_test'))

print('train_data',train_data) #[1460 rows x 81 columns]
print('test_data',test_data) #[1459 rows x 80 columns]
#两者样本数差不多,但是train比test多一个维度,这是因为train的数据是有标号的

print(train_data.iloc[0:4, [0, 1, 2, 3, -3, -2, -1]])
#打印表格的前面前面4行,还有前面4列和后面4列

打印结果

'''
   Id  MSSubClass MSZoning  LotFrontage SaleType SaleCondition  SalePrice
0   1          60       RL         65.0       WD        Normal     208500
1   2          20       RL         80.0       WD        Normal     181500
2   3          60       RL         68.0       WD        Normal     223500
3   4          70       RL         60.0       WD       Abnorml     140000

这里可以看出来id是不用进入训练的,其他zoing是房子的位置、lot是房子前面有多大、scaletype类型、condition情况、sale price是最终我们需要预测的,我们根据这些输入特征,做出预测
'''
#把第一列id删除,用pandas的concat函数
all_features = pd.concat((train_data.iloc[:, 1:-1], test_data.iloc[:, 1:]))

3.数据的预处理

将所有缺失的值替换为相应特征的平均值,为了将所有特征放在⼀个共同的尺度上,我们通过将特征重新缩放到零均值和单位⽅差来标准化数据:
numeric_features= all_features.dtypes[all_features.dtypes != 'object'].index 
#之前这里在'object'里多打了一个空格导致报错
#找到哪些是数值特征,哪些是文本特征
all_features[numeric_features] = all_features[numeric_features].apply(lambda x: (x - x.mean()) / (x.std()))
#把这一列是数据- 平均值后再除以他的方差,这里是把训练和数据集放在一起这么做,实际情况要分开处理,这里是举例
all_features[numeric_features] = all_features[numeric_features].fillna(0)
现在处理字符串/离散值,用一次独热编码替换他们,pandas的软件包可以完成
“Dummy_na=True”将“na”(缺失值)视为有效的特征值,并为其创建指⽰符特征
all_features = pd.get_dummies(all_features,dummy_na = True)
print(all_features.shape) 
#(2919, 331)去掉id以后拿到了331个特征
从pandas中提取numpy格式并且转换成tensor
n_train = train_data.shape[0]
train_features = torch.tensor(all_features[:n_train].values,dtype=torch.float32)
test_features = torch.tensor(all_features[n_train:].values,dtype=torch.float32)
train_labels = torch.tensor(train_data.SalePrice.values.reshape(-1,1),dtype=torch.float32)

4.开始训练

loss = nn.MSELoss()
in_features = train_features.shape[1]
#就是331


def get_net():
    net = nn.Sequential(nn.Linear(in_features,1))
    return net
#用的单层线性回归模型预测
在这里我们还要关注相对误差,(y-y')/y,(真实值-预测值)/真实值,在这里因为房价的差距过大,所以采用相对误差,对别的问题未必用这个方法
def log_rmse(net,features,labels):
    #对比较大的正数,用log这个函数
    clipped_preds = torch.clamp(net(features),1,float('inf'))
    rmse = torch.sqrt(loss(torch.log(clipped_preds),torch.log(labels)))
    #在这里把预测值和label值都做log,再放入rmse里,最后开根号,这是做完log以后再正常做线性回归
    return rmse.item()
训练函数和以前一样,唯一区别是借助adam优化器(Adam也是一个函数),之前用的SGD,他的好处是对学习率没有那么敏感
def train(net,train_features,train_labels,test_features,test_labels,num_epochs,learning_rate,weight_decay,batch_size):
    train_ls,test_ls = [], [ ]
    train_iter = d2l.load_array((train_features,train_labels),batch_size)
    optimizer = torch.optim.Adam(net.parameters(),lr=learning_rate, weight_decay = weight_decay)
    for epoch in range(num_epochs):
        for X,y in train_iter:
            optimizer.zero_grad()
            l = loss(net(X),y)
            l.backward()
            optimizer.step()
        train_ls.append(log_rmse(net,train_features,train_labels,))
        if test_labels is not None:
            test_ls.append(log_rmse(net,test_features,test_labels))
    return train_ls,test_ls

#K折交叉验证:它有助于模型选择和超参数调整。
#我们⾸先需要定义⼀个函数,在K折交叉验证过程中返回第i折的数据。具体地说,它选择第i个切⽚作为验证数据,其余部分作为训练数据。
def get_k_fold_data(k,i,X,y):
    assert k >1
    fold_size = X.shape[0] // k
    X_train,y_train = None, None
    for j in range(k):
        idx = slice(j * fold_size, (j+1) * fold_size)
        X_part,y_part = X[idx,:],y[idx]
        if j == i:
            X_valid,y_valid = X_part,y_part
        elif X_train is None:
            X_train, y_train = X_part,y_part
        else:
            X_train = torch.cat([X_train,X_part])
            y_train = torch.cat([y_train,y_part])
    return X_train,y_train,X_valid,y_valid

#当我们在K折交叉验证中训练K次后,返回训练和验证误差的平均值
def k_fold(k, X_train, y_train, num_epochs, learning_rate, weight_decay,batch_size):
    train_l_sum, valid_l_sum = 0, 0
    for i in range(k):
        data = get_k_fold_data(k, i, X_train, y_train)
        net = get_net()
        train_ls, valid_ls = train(net, *data, num_epochs, learning_rate,
        weight_decay, batch_size)
        train_l_sum += train_ls[-1]
        valid_l_sum += valid_ls[-1]
        if i == 0:
            d2l.plot(list(range(1, num_epochs + 1)), [train_ls, valid_ls],xlabel='epoch', ylabel='rmse', xlim=[1, num_epochs],legend=['train', 'valid'], yscale='log')
        print(f'折{i + 1},训练log rmse{float(train_ls[-1]):f}, '
        f'验证log rmse{float(valid_ls[-1]):f}')
    return train_l_sum / k, valid_l_sum / k

模型选择
k, num_epochs, lr, weight_decay, batch_size = 5, 200, 0.6, 0, 64
#要做的训练就是不断调整这些参数,看最后验证集的平均rmse是多少,把最好的超参数留下
train_l, valid_l = k_fold(k, train_features, train_labels, num_epochs, lr, weight_decay, batch_size)
print(f'{k}-折验证: 平均训练log rmse: {float(train_l):f}, 'f'平均验证log rmse: {float(valid_l):f}')

当lr = 0.6,epoch=200的时候,平均验证log rmse是0.70

当lr = 0.6,epoch=100的时候,平均验证log rmse是1.33

当lr = 5,epoch = 100的时候,平均验证log rmse是0.16

当lr = 5,epoch = 200的时候,平均验证log rmse是0.15

当lr = 15,epoch = 100的时候,平均验证log rmse是0.15

当lr = 15,epoch = 200的时候,平均验证log rmse是0.1504

个人感觉房价预测的lr不需要很小,5以上就差不多了,但是1左右相差很多

你可能感兴趣的:(李沐机器学习,深度学习)