LightningModule将PyTorch代码整理成5个部分:
例子:
import pytorch_lightning as pl
class LitModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return torch.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.02)
可以看出:
训练方法:
train_loader = DataLoader(MNIST(os.getcwd(), download=True, transform=transforms.ToTensor()))
trainer = pl.Trainer()
model = LitModel()
trainer.fit(model, train_loader)
如果需要对每个training_step的输出做一些操作,可以通过改写training_epoch_end来实现
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y)
preds = ...
return {'loss': loss, 'other_stuff': preds}
def training_epoch_end(self, training_step_outputs):
for pred in training_step_outputs:
# do something
一个完成的案例:
import pytorch_lightning as pl
import torch
from torch import nn
class Autoencoder(pl.LightningModule):
def __init__(self, latent_dim=2):
super().__init__()
self.encoder = nn.Sequential(nn.Linear(28 * 28, 256), nn.ReLU(), nn.Linear(256, latent_dim))
self.decoder = nn.Sequential(nn.Linear(latent_dim, 256), nn.ReLU(), nn.Linear(256, 28 * 28))
def training_step(self, batch, batch_idx):
x, _ = batch
# encode
x = x.view(x.size(0), -1)
z = self.encoder(x)
# decode
recons = self.decoder(z)
# reconstruction
reconstruction_loss = nn.functional.mse_loss(recons, x)
return reconstruction_loss
def validation_step(self, batch, batch_idx):
x, _ = batch
x = x.view(x.size(0), -1)
z = self.encoder(x)
recons = self.decoder(z)
reconstruction_loss = nn.functional.mse_loss(recons, x)
self.log('val_reconstruction', reconstruction_loss)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=0.0002)
训练方式:
autoencoder = Autoencoder()
trainer = pl.Trainer(gpus=1)
trainer.fit(autoencoder, train_dataloader, val_dataloader)
在LightningModule中迭代不同的模型
for model in [resnet50(), vgg16(), BidirectionalRNN()]:
task = ClassificationTask(model)
trainer = Trainer(gpus=2)
trainer.fit(task, train_dataloader, val_dataloader)
(非常全面)
PL流程:初始化 def init(self) -->训练training_step(self, batch, batch_idx) --> 校验validation_step(self, batch, batch_idx) --> 测试 test_step(self, batch, batch_idx). 就完事了,总统是实现这三个函数的重写。
更为完整的流程是在training_step 、validation_step、test_step 后面都紧跟着其相应的 training_step_end(self,batch_parts)和training_epoch_end(self, training_step_outputs) 函数,当然,对于校验和测试,都有相应的*_step_end和*_epoch_end函数。
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = F.cross_entropy(y_hat, y)
pred = ...
return {'loss': loss, 'pred': pred}
def training_step_end(self, batch_parts):
'''
当gpus=0 or 1时,这里的batch_parts即为traing_step的返回值(已验证)
当gpus>1时,这里的batch_parts为list,list中每个为training_step返回值,list[i]为i号gpu的返回值(这里未验证)
'''
gpu_0_prediction = batch_parts[0]['pred']
gpu_1_prediction = batch_parts[1]['pred']
# do something with both outputs
return (batch_parts[0]['loss'] + batch_parts[1]['loss']) / 2
def training_epoch_end(self, training_step_outputs):
'''
当gpu=0 or 1时,training_step_outputs为list,长度为steps的数量(不包括validation的步数,当你训练时,你会发现返回list<训练时的steps数,这是因为训练时显示的steps数据还包括了validation的,若将limit_val_batches=0.,即关闭validation,则显示的steps会与training_step_outputs的长度相同)。list中的每个值为字典类型,字典中会存有`training_step_end()`返回的键值,键名为`training_step()`函数返回的变量名,另外还有该值是在哪台设备上(哪张GPU上),例如{device='cuda:0'}
'''
for out in training_step_outputs:
# do something with preds
默认为每1个epoch校验一次,即自动调用validation_step()函数
trainer = Trainer(check_val_every_n_epoch=1)
当一个epoch 比较大时,就需要在单个epoch 内进行多次校验,这时就需要对校验的调动频率进行修改, 传入val_check_interval的参数为float型时表示百分比,为int时表示batch:
# 每训练单个epoch的 25% 调用校验函数一次,注意:要传入float型数
trainer = Trainer(val_check_interval=0.25)
# 当然也可以是单个epoch训练完多少个batch后调用一次校验函数,但是一定是传入int型
trainer = Trainer(val_check_interval=100) # 每训练100个batch校验一次
当然,首先要自己先实现Dataset的定义,可以用现有的,例如MNIST等数据集,若用自己的数据集,则需要自己去继承torch.utils.data.dataset.Dataset,自定义类,这一部分不再细讲,查其他的资料。
直接实现:
直接实现是指在Model中重写def train_dataloader(self)等函数来返回dataloader:
class ExampleModel(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.train_dataset = ...
self.val_dataset = ...
self.test_dataset = ...
...
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=False, num_workers=0)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=1, shuffle=True)
自定义DataModule
这种方法是继承pl.LightningDataModule来提供训练、校验、测试的数据。
class MyDataModule(pl.LightningDataModule):
def __init__(self):
super().__init__()
...blablabla...
def setup(self, stage):
# 实现数据集的定义,每张GPU都会执行该函数, stage 用于标记是用于什么阶段
if stage == 'fit' or stage is None:
self.train_dataset = DCKDataset(self.train_file_path, self.train_file_num, transform=None)
self.val_dataset = DCKDataset(self.val_file_path, self.val_file_num, transform=None)
if stage == 'test' or stage is None:
self.test_dataset = DCKDataset(self.test_file_path, self.test_file_num, transform=None)
def prepare_data(self):
# 在该函数里一般实现数据集的下载等,只有cuda:0 会执行该函数
pass
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size, shuffle=False, num_workers=0)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size, shuffle=False)
def test_dataloader(self):
return DataLoader(self.test_dataset, batch_size=1, shuffle=True)
自动保存模型:
Lightning 会自动保存最近训练的epoch的模型到当前的工作空间(or.getcwd()),也可以在定义Trainer的时候指定:
trainer = Trainer(default_root_dir='/your/path/to/save/checkpoints')
当然,也可以关闭自动保存模型:
trainer = Trainer(checkpoint_callback=False)