pytorch自动混合精度训练

 

from torch.cuda.amp import autocast, GradScaler

    # Init Step 1: Create Model
    model, device, start_epoch = create_model(opt)
    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)
    model.cuda()

    # Init Step 2: Create Dataset
    dataloader = create_dataset(opt.train_path)

    # Init Step 3: Create Optimizer
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)  # Original

    scaler = GradScaler() """创建一个尺度管理器"""

            # Train Step 1: Forward pass, get loss
            with autocast(): """开启混合精度模式,只进行前向传播"""
                loss, outputs = model(P, A, L, targets)
                loss = torch.mean(loss)
            
            # Train Step 2: Backward pass, get gradient
            scaler.scale(loss).backward() """使用尺度管理器进行调整"""

            # Train Step 3: Optimize params
            scaler.step(optimizer)
            scaler.update()

 

你可能感兴趣的:(Pytorch,pytorch)