Pytorch中DistributedDataParallel基本使用

一、DistributedDataParallel简介

nn.DistributedDataParallelnn.DataParallel都是模型并行训练的方法,它们之间的区别如下:

  • DistributedDataParallel不仅支持单机多卡,而且支持多机多卡;DataParallel仅支持单机多卡
  • DataParallel在的每个前向过程中,模型会由GPU1复制到其它GPUs,这会引入延迟。其次,在每个GPU完成前向运算后,其输出会被被收集到GPU1上计算Loss。DistributedDataParallel的前向后向过程更加简洁,推理、损失函数计算,梯度计算都是并行独立完成的。DistributedDataParallel实现并行训练的核心在于梯度同步,通常每个GPU会建立独立的优化器。

因此,使用DistributedDataParallel进行并行训练通常会比DataParallel更加高效。

二、DistributedDataParallel的使用

本文只介绍最简单的使用方法,伪代码如下,需改动的地方均已注释。

1. train.py

from __future__ import print_function, division
import sys
sys.path.append('core')
import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
import evaluate
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from torch import distributed as dist

# 改动1,获取进程号,用于分配GPU
local_rank = int(os.environ["LOCAL_RANK"])

try:
    from torch.cuda.amp import GradScaler
except:
    # dummy GradScaler for PyTorch < 1.6
    class GradScaler:
        def __init__(self):
            pass

        def scale(self, loss):
            return loss

        def unscale_(self, optimizer):
            pass

        def step(self, optimizer):
            optimizer.step()

        def update(self):
            pass

SUM_FREQ = 100
VAL_FREQ = 5000

def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


def fetch_optimizer(args, model):
    """ Create the optimizer and learning rate scheduler """
    optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
    scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps + 100, pct_start=0.05,
                                              cycle_momentum=False, anneal_strategy='linear')
    return optimizer, scheduler


class Logger:
    def __init__(self, model, scheduler):
        self.model = model
        self.scheduler = scheduler
        self.total_steps = 0
        self.running_loss = {}
        self.writer = None

    def _print_training_status(self):
        metrics_data = [self.running_loss[k] / SUM_FREQ for k in sorted(self.running_loss.keys())]
        training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps + 1, self.scheduler.get_last_lr()[0])
        metrics_str = ("{:10.4f}, " * len(metrics_data)).format(*metrics_data)
        # print the training status
        # print(training_str + metrics_str)
        if self.writer is None:
            self.writer = SummaryWriter()
        for k in self.running_loss:
            self.writer.add_scalar(k, self.running_loss[k] / SUM_FREQ, self.total_steps)
            self.running_loss[k] = 0.0

    def push(self, metrics):
        self.total_steps += 1
        for key in metrics:
            if key not in self.running_loss:
                self.running_loss[key] = 0.0
            self.running_loss[key] += metrics[key]
        if self.total_steps % SUM_FREQ == SUM_FREQ - 1:
            self._print_training_status()
            self.running_loss = {}

    def write_dict(self, results):
        if self.writer is None:
            self.writer = SummaryWriter()
        for key in results:
            self.writer.add_scalar(key, results[key], self.total_steps)

    def close(self):
        self.writer.close()


def train(args):
    # 改动4,模型初始化方式,SyncBatchNorm为同步BN,可选
    device = torch.device('cuda:{}'.format(local_rank))
    # model = nn.SyncBatchNorm.convert_sync_batchnorm(Mymodel(args)).to(device)
    model = Mymodel(args).to(device)
    model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
    
    print("Parameter Count: %d" % count_parameters(model))
    if args.restore_ckpt is not None:
        model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
    model.cuda()
    model.train()

    # 改动5,dataset加载,不用设置shuffle,其余可自己更改设置
    train_dataset = MyDataset(args)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size, pin_memory=True,
                                   num_workers=4, drop_last=True, sampler=train_sampler)

    optimizer, scheduler = fetch_optimizer(args, model)
    total_steps = 0
    scaler = GradScaler(enabled=args.mixed_precision)

    # 改动6,SummaryWriter只在单个进程记录数据,保存模型也是,下面不再一一注释
    if local_rank == 0:
        logger = Logger(model, scheduler)
    else:
        logger = None

    should_keep_training = True
    while should_keep_training:
        for i_batch, data_blob in enumerate(tqdm(train_loader)):
            optimizer.zero_grad()
            data, label = [x.cuda() for x in data_blob]
            prediction = model(data)
            loss, metrics = compute_loss(prediction, label)
            scaler.scale(loss).backward()
            scaler.unscale_(optimizer)
            torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
            scaler.step(optimizer)
            scheduler.step()
            scaler.update()
            if local_rank == 0:
                logger.push(metrics)
            if total_steps % VAL_FREQ == VAL_FREQ - 1:
                PATH = 'checkpoints/%d_%s.pth' % (total_steps + 1, args.name)
                if local_rank == 0:
                    torch.save(model.state_dict(), PATH)
                    results = {}
                    for val_dataset in args.validation:
                        if val_dataset == 'validate_dataset':
                            results.update(evaluate.validate(model.module))
                    logger.write_dict(results)
                    model.train()
            total_steps += 1
            if total_steps > args.num_steps:
                should_keep_training = False
                break
    if local_rank == 0:
        logger.close()
    PATH = 'checkpoints/%s.pth' % args.name
    torch.save(model.state_dict(), PATH)
    return PATH


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--name', default='raft', help="name your experiment")
    parser.add_argument('--stage', help="determines which dataset to use for training")
    parser.add_argument('--restore_ckpt', help="restore checkpoint")
    parser.add_argument('--small', action='store_true', help='use small model')
    parser.add_argument('--validation', type=str, nargs='+')

    parser.add_argument('--lr', type=float, default=0.00002)
    parser.add_argument('--num_steps', type=int, default=100000)
    parser.add_argument('--batch_size', type=int, default=6)
    parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
    parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')

    parser.add_argument('--wdecay', type=float, default=.00005)
    parser.add_argument('--epsilon', type=float, default=1e-8)
    parser.add_argument('--clip', type=float, default=1.0)
    parser.add_argument('--dropout', type=float, default=0.0)
    parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
    args = parser.parse_args()

    torch.manual_seed(1234)
    np.random.seed(1234)
    if not os.path.isdir('checkpoints'):
        os.mkdir('checkpoints')

    # 改动2,将batch_size变为1/N倍,N为卡的数量
    assert args.batch_size % torch.cuda.device_count() == 0
    args.batch_size = args.batch_size // torch.cuda.device_count()

    # 改动3,初始化进程组,分配GPU
    dist.init_process_group(backend="nccl")
    torch.cuda.set_device(local_rank)
    device = torch.device('cuda:{}'.format(local_rank))

    train(args)

2. train.sh

启动方式如下,通过CUDA_VISIBLE_DEVICES设置使用的卡,nproc_per_node为卡的数量,master_port 可以随意设一个没有进程使用的。

#!/bin/bash
mkdir -p checkpoints
CUDA_VISIBLE_DEVICES=1,3 torchrun --nproc_per_node=2 --master_port 38588 train.py --name exp_name \
                                                                                  --validation val_dataset_name \
                                                                                  --num_steps 120000 \
                                                                                  --batch_size 10 \
                                                                                  --lr 0.00025 \
                                                                                  --image_size 368 496 \
                                                                                  --wdecay 0.0001 \
                                                                                  --mixed_precision

三、常见问题

  • Dataset不需要shuffle,DistributedSampler默认shuffle=True
  • SummaryWriter记录数据、保存checkpoints都只需要在一个进程上进行
  • find_unused_parameters设置成True还是False,建议在确认自己的模型没有unused_parameters时,设置成False,能够进一步加速训练。确认自己模型有无unused_parameters的方法:在loss.backward()和optimizer.step()之间加入以下代码
for name, param in model.named_parameters():
	if param.grad is None:
		print(name)

你可能感兴趣的:(pytorch,pytorch,深度学习,python)