nn.BCEWithLogisticLoss() & nn.BCELoss()的区别 ,nn.CrossEntropyLoss() & nn.NLLLoss()的区别

nn.BCEWithLogisticLoss() = F.sigmoid() + F.binary_cross_entropy()/nn.BCELoss()

import torch
import torch.nn as nn
import torch.nn.functional as F
"""
nn.BCEWithLogisticLoss() = F.sigmoid() + F.binary_cross_entropy()/nn.BCELoss()
"""
pred = torch.tensor([[-0.2],[0.2],[0.8]])
target = torch.FloatTensor([[0],[0],[1]])
bce_loss = F.binary_cross_entropy(F.sigmoid(pred),target) 
# sigmoid = nn.Sigmoid()
# loss_bce = nn.BCELoss()
# bce_loss = loss_bce(sigmoid(pred),target)
print('bce_out',bce_loss)
bcew_loss = F.binary_cross_entropy_with_logits(pred,target)
# loss_bcewlogistic = nn.BCEWithLogitsLoss()
# bcew_loss = loss_bcewlogistic(pred, target)
print('bcewlogistic_out',bcew_loss )
# output
bce_out tensor(0.5891)
bcewlogistic_out tensor(0.5891)

nn.CrossEntropyLoss = F.log_softmax() + F.nll_loss()/nn.NLLLoss()

import torch
import torch.nn as nn
import torch.nn.functional as F

"""
nn.CrossEntropyLoss = F.log_softmax() + F.nll_loss()/nn.NLLLoss()
"""
data = torch.randn(2, 5)
nn.Softmax()
log_soft = F.log_softmax(data, dim=1)
print('log_soft:', log_soft, '\n')

target = torch.tensor([1, 2])
# 使用F.cross_entropy()
# entropy_out = F.cross_entropy(data, target)
EntropyLoss = nn.CrossEntropyLoss()
entropy_out = EntropyLoss(data, target)
# 使用F.nll_loss()
# nll_out = F.nll_loss(log_soft, target)
# 使用nn.NLLLoss()
NLLLoss = nn.NLLLoss()  # nn.NLLLoss()需要实例化
nll_out = NLLLoss(log_soft, target)
print('entropy_out:', entropy_out)
print('nll_out:', nll_out)
log_soft: tensor([[-3.1368, -2.0374, -1.2469, -1.2996, -1.3236],
        [-1.2834, -1.0692, -2.5731, -1.5507, -2.3944]]) 

entropy_out: tensor(2.3053)
nll_out: tensor(2.3053)

你可能感兴趣的:(pytorch,深度学习与机器学习,深度学习,python,人工智能)