from torch.nn.modules.loss import CrossEntropyLoss
pred = torch.FloatTensor([[1, 0], [1, 0], [1, 1]])
target = torch.FloatTensor([[1, 0], [1, 0], [1, 1]])
ce_loss = CrossEntropyLoss()
loss_ce = ce_loss(pred, target)
print(loss_ce)
import tensorflow as tf
def poly1_cross_entropy(logits, labels, epsilon=1.0):
pt = tf.reduce_sum(labels * tf.nn.softmax(logits), axis=-1)
CE = tf.nn.softmax_cross_entropy_with_logits(labels, logits)
Poly1 = CE + epsilon * (1 - pt)
batch_size = target.size(0)
return np.sum(Poly1)/batch_size
pred = torch.FloatTensor([[0, 0], [1, 0], [1, 1]])
target = torch.FloatTensor([[0, 0], [1, 0], [1, 1]])
loss_PolyLoss = poly1_cross_entropy(pred, target)
print("loss_PolyLoss:",loss_PolyLoss)
import numpy as np
import torch
from medpy import metric
from scipy.ndimage import zoom
import torch.nn as nn
import SimpleITK as sitk
import copy
from PIL import Image
from matplotlib import pyplot as plt
import os
from torch.nn.modules.loss import CrossEntropyLoss
class DiceLoss(nn.Module):
def __init__(self, n_classes):
super(DiceLoss, self).__init__()
self.n_classes = n_classes
def _one_hot_encoder(self, input_tensor):
tensor_list = []
for i in range(self.n_classes):
temp_prob = input_tensor == i # * torch.ones_like(input_tensor)
tensor_list.append(temp_prob.unsqueeze(1))
output_tensor = torch.cat(tensor_list, dim=1)
return output_tensor.float()
def _dice_loss(self, score, target):
target = target.float()
smooth = 1e-5
intersect = torch.sum(score * target)
y_sum = torch.sum(target * target)
z_sum = torch.sum(score * score)
loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
loss = 1 - loss
return loss
def forward(self, inputs, target, weight=None, softmax=False):
if softmax:
inputs = torch.softmax(inputs, dim=1)
# target = self._one_hot_encoder(target)
if weight is None:
weight = [1] * self.n_classes
assert inputs.size() == target.size(), 'predict {} & target {} shape do not match'.format(inputs.size(), target.size())
class_wise_dice = []
loss = 0.0
for i in range(0, self.n_classes):
dice = self._dice_loss(inputs[:, i], target[:, i])
class_wise_dice.append(1.0 - dice.item())
loss += dice * weight[i]
return loss / self.n_classes
pred = torch.FloatTensor([[1, 1], [1, 0]])
target = torch.FloatTensor([[1, 0], [1, 0]])
dice_loss = DiceLoss(2)
loss_dice = dice_loss(pred, target, softmax=True)
print("loss_dice:",loss_dice)
def tversky_loss(inputs, targets, beta=0.7, weights=None):
batch_size = targets.size(0)
loss = 0.0
for i in range(batch_size):
prob = inputs[i]
ref = targets[i]
alpha = 1.0-beta
tp = (ref*prob).sum()
fp = ((1-ref)*prob).sum()
fn = (ref*(1-prob)).sum()
tversky = tp/(tp + alpha*fp + beta*fn)
loss = loss + (1-tversky)
return loss/batch_size
pred = torch.FloatTensor([[1, 1], [1, 0]])
target = torch.FloatTensor([[1, 0], [1, 0]])
loss_Tversky = tversky_loss(pred, target)
print("loss_Tversky:",loss_Tversky)
def log_cosh_dice_loss(y_true, y_pred):
dice_loss = DiceLoss(2)
x = dice_loss(pred, target, softmax=True)
return tf.math.log((torch.exp(x) + torch.exp(-x)) / 2.0)
pred = torch.FloatTensor([[1, 0], [1, 1]])
target = torch.FloatTensor([[1, 0], [1, 0]])
log_dice_loss = log_cosh_dice_loss(target, pred)
print("log_dice_loss:",log_dice_loss)
import numpy as np
import glob
import tqdm
from PIL import Image
import cv2 as cv
import os
from sklearn.metrics import confusion_matrix,cohen_kappa_score
from skimage import io
from skimage import measure
from scipy import ndimage
from sklearn.metrics import f1_score
mIoU可解释为平均交并比,即在每个类别上计算IoU值(即真正样本数量/(真正样本数量+假负样本数量+假正样本数量))
def mean_iou(input, target, classes = 2):
""" compute the value of mean iou
:param input: 2d array, int, prediction
:param target: 2d array, int, ground truth
:param classes: int, the number of class
:return:
miou: float, the value of miou
"""
miou = 0
for i in range(classes):
intersection = np.logical_and(target == i, input == i)
# print(intersection.any())
union = np.logical_or(target == i, input == i)
temp = torch.sum(intersection) / torch.sum(union)
miou += temp
return miou/classes
pred = torch.FloatTensor([[1.0, 1.0], [1.0, 0.0]])
target = torch.FloatTensor([[1.0, 0.0], [1.0, 0.0]])
print(mean_iou(pred, target))
F1分数(F1 Score),是统计学中用来衡量二分类模型精确度的一种指标。它同时兼顾了分类模型的精确率和召回率。F1分数可以看作是模型精确率和召回率的一种调和平均,它的最大值是1,最小值是0。
def compute_f1(prediction, target):
"""
:param prediction: 2d array, int,
estimated targets as returned by a classifier
:param target: 2d array, int,
ground truth
:return:
f1: float
"""
prediction.tolist(), target.tolist()
img, target = np.array(prediction).flatten(), np.array(target).flatten()
f1 = f1_score(y_true=target, y_pred=img)
return f1
pred = torch.FloatTensor([[1.0, 1.0], [1.0, 0.0]])
target = torch.FloatTensor([[1.0, 0.0], [1.0, 0.0]])
# pred = np.array([[1.0, 1.0], [1.0, 0.0]])
# target = np.array([[1.0, 0.0], [1.0, 0.0]])
print(compute_f1(pred, target))
from medpy import metric
pred = np.array([[1.0, 1.0], [1.0, 0.0]])
target = np.array([[1.0, 0.0], [1.0, 0.0]])
print(metric.binary.dc(pred, target))
IoU (Intersection over Union) 从字面意义上来说就是交并比,顾名思义就是两个集合的交集与两个集合的并集之比。
def iou(input, target, classes=1):
""" compute the value of iou
:param input: 2d array, int, prediction
:param target: 2d array, int, ground truth
:param classes: int, the number of class
:return:
iou: float, the value of iou
"""
intersection = np.logical_and(target == classes, input == classes)
# print(intersection.any())
union = np.logical_or(target == classes, input == classes)
iou = torch.sum(intersection) / torch.sum(union)
return iou
pred = torch.FloatTensor([[1.0, 1.0], [1.0, 0.0]])
target = torch.FloatTensor([[1.0, 0.0], [1.0, 0.0]])
print(iou(pred, target))
Kappa系数用于一致性检验,也可以用于衡量分类精度,但kappa系数的计算是基于混淆矩阵的。
def compute_kappa(prediction, target):
"""
:param prediction: 2d array, int,
estimated targets as returned by a classifier
:param target: 2d array, int,
ground truth
:return:
kappa: float
"""
prediction.tolist(), target.tolist()
img, target = np.array(prediction).flatten(), np.array(target).flatten()
kappa = cohen_kappa_score(target, img)
return kappa
pred = torch.FloatTensor([[1.0, 1.0], [1.0, 0.0]])
target = torch.FloatTensor([[1.0, 0.0], [1.0, 0.0]])
print(compute_kappa(pred, target))
基于混淆矩阵实现,混淆矩阵的对角线之和除以该矩阵的元素和。
def compute_acc(gt, pred):
matrix = confusion_matrix(y_true=np.array(gt).flatten(), y_pred=np.array(pred).flatten())
acc = np.diag(matrix).sum() / matrix.sum()
return acc
pred = torch.FloatTensor([[1.0, 1.0], [1.0, 0.0]])
target = torch.FloatTensor([[1.0, 0.0], [1.0, 0.0]])
print(compute_acc(pred, target))
召回率(Recall)就是被分为正类的样本数与测试数据集中的实际正类的样本数之比,意味着应该被分为正类的样本中会有多少是被正确分类出来,如下式所示:
def compute_recall(gt, pred):
# 返回所有类别的召回率recall
matrix = confusion_matrix(y_true=np.array(gt).flatten(), y_pred=np.array(pred).flatten())
recall = np.diag(matrix) / matrix.sum(axis = 0)
return recall
pred = torch.FloatTensor([[1.0, 1.0], [1.0, 0.0]])
target = torch.FloatTensor([[1.0, 0.0], [1.0, 0.0]])
print(compute_recall(pred, target))