pytorch实现加权交叉熵

  • 做语义分割时,发现还是加权交叉熵损失函数好用
  • 以两个类别为例,代码如下
import torch
import torch.nn.functional as F

class MyCELoss(nn.Module):

    def __init__(self, thresh, ignore_lb=255, n_min=90000):
        super(MyCELoss, self).__init__()
        self.thresh = -torch.log(torch.tensor(thresh, requires_grad=False, dtype=torch.float)).cuda()
        self.ignore_lb = ignore_lb
        self.n_min = n_min
        #self.criteria = nn.CrossEntropyLoss(ignore_index=ignore_lb, reduction='none')
	'''
	logits: net output   bs*c*w*h
	labels: gt           bs*w*h
	'''
    def forward(self, logits, labels):
        #n_min = labels[labels != self.ignore_lb].numel() // 16
        # 类别间求log_softmax,celoss即为对应标签位置的值
        ls = F.log_softmax(logits, dim=1)
        p = torch.exp(ls)
        # focal_loss难易样本权重
        gama = torch.pow(1-p, 2)
        # gt转成one_hot形式
        mask = F.one_hot(labels, logits.shape[1]).float()
        # 正负样本权重
        mask[..., 0] *= -0.0194
        mask[..., 1] *= -0.9806
        # mask转换成bs*c*w*h
        mask = mask.permute(0, 3, 1, 2)
        loss = mask * gama * ls
        # 类别维度求和,对每个点只有标签位置的值不为0
        # 结果是每个点加权后的celoss,bs*w*h
        loss = loss.sum(dim=1)
        
        # 在线难例挖掘 OhemCELoss
        loss = loss.view(-1)
        #loss_hard = loss[loss > self.thresh]
        #if loss_hard.numel() < self.n_min:
            #loss_hard, _ = loss.topk(self.n_min)

        return torch.mean(loss)

你可能感兴趣的:(pytorch,deeplearning,pytorch,深度学习)