使用Optuna进行模型的自动调参

在做机器学习作业的时候苦于手动调参(甚至一度想自己简单写个函数自动调参,不过想来python库中肯定有相关函数,便有了这篇文章)在文章的结尾,我会以李宏毅机器学习hw01为例子介绍怎么插入这段代码
关于课程说句题外话,2023 更多的是讲一些前沿的东西,基础性课程还是用的过去的影像,所以你如果觉得 2023 课程进度慢(因为刚开始)可以看 2022 的
李宏毅2023春机器学习课程
李宏毅2021/2022春机器学习课程
Study — Optuna 1.4.0 文档

假设你已经编写好了你的 model classtrainer() ,其中trainer() 返回的值是模型在验证集上的 performance

如果满足上述条件,你可以直接替换掉代码中 YourModelClass()trainer() 函数,然后修改 hyperparameters 字典中的参数来进行调优。

注意,需要根据目标函数修改 direction ,根据需要修改 n_trials ,然后,将所有需要调参的函数调用塞入 objective()

import optuna

def objective(trial):
    # 定义需要调优的超参数空间
    hyperparameters = {
        'learning_rate': trial.suggest_loguniform('lr', 1e-5, 1e-1),
        'batch_size': trial.suggest_categorical('batch_size', [16, 32, 64]),
    }
    
    # 调用trainer函数进行训练并返回验证集上的 performance
    model = YourModelClass()
    performance = trainer(model, hyperparameters)
    
    # 简单来说,只要你的 trainer() 接收 hyperparameters 并返回 performance,就能成功运行
    return performance

# 使用Optuna库进行超参数搜索
study = optuna.create_study(direction='maximize') # 根据目标函数来决定是'minimize'还是'maximize'
study.optimize(objective, n_trials=100) # n_trials 指的是试验次数

# 输出最优的超参数组合和性能指标
print('Best hyperparameters: {}'.format(study.best_params))
print('Best performance: {:.4f}'.format(study.best_value))

举例说明

写在前面:如果你还没有手动调参过,那么我建议你先去试试,而不是将调参这件事提前当作黑箱进行理解
这里有一份不错的文档:Deep Learning Tuning Playbook,或许能给你提供帮助
下面的例子来源于李宏毅机器学习课程的homework01,完整代码在colab上:Code
如果你并不需要做这份作业,就不用花费时间去查看,因为我并没有将例子缩减到容易理解的地步

这是课程助教写的一个 simple baseline 的代码样例,我删去了一些不重要的东西

class My_Model(nn.Module):
    def __init__(self, input_dim):
        super(My_Model, self).__init__()
        self.layers = nn.Sequential(
            nn.Linear(input_dim, 16),
            nn.ReLU(),
            nn.Linear(16, 8),
            nn.ReLU(),
            nn.Linear(8, 1)
        )

    def forward(self, x):
        x = self.layers(x)
        x = x.squeeze(1) # (B, 1) -> (B)
        return x

# Feature Selection
def select_feat(train_data, valid_data, test_data, select_all=True):
    '''Selects useful features to perform regression'''
    y_train, y_valid = train_data[:,-1], valid_data[:,-1]
    raw_x_train, raw_x_valid, raw_x_test = train_data[:,:-1], valid_data[:,:-1], test_data

    if select_all:
        feat_idx = list(range(raw_x_train.shape[1]))
    else:
        feat_idx = [0,1,2,3,4]
        
    return raw_x_train[:,feat_idx], raw_x_valid[:,feat_idx], raw_x_test[:,feat_idx], y_train, y_valid

def trainer(train_loader, valid_loader, model, config, device):

    criterion = nn.MSELoss(reduction='mean')

    optimizer = torch.optim.SGD(model.parameters(), lr=config['learning_rate']) 

    n_epochs, best_loss, step, early_stop_count = 5000, math.inf, 0, 0

    for epoch in range(n_epochs):
        model.train() # Set your model to train mode.
        loss_record = []

        for x, y in train_loader:
            optimizer.zero_grad()               # Set gradient to zero.
            x, y = x.to(device), y.to(device)   # Move your data to device. 
            pred = model(x)             
            loss = criterion(pred, y)
            loss.backward()                     # Compute gradient(backpropagation).
            optimizer.step()                    # Update parameters.
            step += 1
            loss_record.append(loss.detach().item())

        mean_train_loss = sum(loss_record)/len(loss_record)

        model.eval() # Set your model to evaluation mode.
        loss_record = []
        for x, y in valid_loader:
            x, y = x.to(device), y.to(device)
            with torch.no_grad():
                pred = model(x)
                loss = criterion(pred, y)

            loss_record.append(loss.item())
            
        mean_valid_loss = sum(loss_record)/len(loss_record)

        if mean_valid_loss < best_loss:
            best_loss = mean_valid_loss
            early_stop_count = 0
        else: 
            early_stop_count += 1

        if early_stop_count >= 400:
            print('\nModel is not improving, so we halt the training session.')
            return best_loss # 源代码是不返回loss的,这里加上用来自动调参

# 处理数据
train_data, test_data = pd.read_csv('./covid_train.csv').values, pd.read_csv('./covid_test.csv').values
train_data, valid_data = train_valid_split(train_data, 0.2, 5201314)

# Select features
x_train, x_valid, x_test, y_train, y_valid = select_feat(train_data, valid_data, test_data, True)

train_dataset, valid_dataset, test_dataset = COVID19Dataset(x_train, y_train), \
                                            COVID19Dataset(x_valid, y_valid), \
                                            COVID19Dataset(x_test)

# Pytorch data loader loads pytorch dataset into batches.
train_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True, pin_memory=True)
valid_loader = DataLoader(valid_dataset, batch_size=config['batch_size'], shuffle=True, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size=config['batch_size'], shuffle=False, pin_memory=True)
	
model = My_Model(input_dim=x_train.shape[1]).to(device) # put your model and data on the same computation device.
trainer(train_loader, valid_loader, model, config, device)

现在有了我们自己的 model classtrainer(),就可以直接代入上面的代码了,

import optuna

def objective(trial):
    # 定义需要调优的超参数空间
    config = {
        'learning_rate': trial.suggest_loguniform('lr', 1e-5, 1e-1),
        'batch_size': trial.suggest_categorical('batch_size', [16, 32, 64]),
    }
    
    # 处理数据
    train_data, test_data = pd.read_csv('./covid_train.csv').values, pd.read_csv('./covid_test.csv').values
	train_data, valid_data = train_valid_split(train_data, 0.2, 5201314)
	
	# Feature Selection
	x_train, x_valid, x_test, y_train, y_valid = select_feat(train_data, valid_data, test_data, True)
	
	train_dataset, valid_dataset, test_dataset = COVID19Dataset(x_train, y_train), \
	                                            COVID19Dataset(x_valid, y_valid), \
	                                            COVID19Dataset(x_test)
	
	# Pytorch data loader loads pytorch dataset into batches.
	train_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True, pin_memory=True)
	valid_loader = DataLoader(valid_dataset, batch_size=config['batch_size'], shuffle=True, pin_memory=True)
	test_loader = DataLoader(test_dataset, batch_size=config['batch_size'], shuffle=False, pin_memory=True)
	
    # 调用 trainer() 进行训练并返回验证集上的 performance
    model = My_Model(input_dim=x_train.shape[1]).to(device) 
	performance = trainer(train_loader, valid_loader, model, config, device)
    
    # 简单来说,只要你的 trainer() 接收 config 并返回 performance,就能成功运行
    return performance

# 使用Optuna库进行超参数搜索
study = optuna.create_study(direction='minimize')
study.optimize(objective, n_trials=100)

# 输出最优的超参数组合和性能指标
print('Best hyperparameters: {}'.format(study.best_params))
print('Best performance: {:.4f}'.format(study.best_value))

你可能感兴趣的:(机器学习,python,深度学习,自动调参)