【pytorch】图像分割中IOU等评价指标的计算

理论理解参考:【语义分割】评价指标:PA、CPA、MPA、IoU、MIoU详细总结和代码实现(零基础从入门到精通系列!)


"""
refer to https://github.com/jfzhang95/pytorch-deeplab-xception/blob/master/utils/metrics.py
"""
import numpy as np
import cv2

__all__ = ['SegmentationMetric']

"""
confusionMetric  # 注意:此处横着代表预测值,竖着代表真实值,与之前介绍的相反
P\L     P    N
P      TP    FP
N      FN    TN
"""


class SegmentationMetric(object):
    def __init__(self, numClass):
        self.numClass = numClass
        self.confusionMatrix = np.zeros((self.numClass,) * 2)  # 混淆矩阵(空)

    def pixelAccuracy(self):
        # return all class overall pixel accuracy 正确的像素占总像素的比例
        #  PA = acc = (TP + TN) / (TP + TN + FP + TN)
        acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()
        return acc

    def classPixelAccuracy(self):
        # return each category pixel accuracy(A more accurate way to call it precision)
        # acc = (TP) / TP + FP
        classAcc = np.diag(self.confusionMatrix) / self.confusionMatrix.sum(axis=1)
        return classAcc  # 返回的是一个列表值,如:[0.90, 0.80, 0.96],表示类别1 2 3各类别的预测准确率

    def meanPixelAccuracy(self):
        """
        Mean Pixel Accuracy(MPA,均像素精度):是PA的一种简单提升,计算每个类内被正确分类像素数的比例,之后求所有类的平均。
        :return:
        """
        classAcc = self.classPixelAccuracy()
        meanAcc = np.nanmean(classAcc)  # np.nanmean 求平均值,nan表示遇到Nan类型,其值取为0
        return meanAcc  # 返回单个值,如:np.nanmean([0.90, 0.80, 0.96, nan, nan]) = (0.90 + 0.80 + 0.96) / 3 =  0.89

    def IntersectionOverUnion(self):
        # Intersection = TP Union = TP + FP + FN
        # IoU = TP / (TP + FP + FN)
        intersection = np.diag(self.confusionMatrix)  # 取对角元素的值,返回列表
        union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(
            self.confusionMatrix)  # axis = 1表示混淆矩阵行的值,返回列表; axis = 0表示取混淆矩阵列的值,返回列表
        IoU = intersection / union  # 返回列表,其值为各个类别的IoU
        return IoU

    def meanIntersectionOverUnion(self):
        mIoU = np.nanmean(self.IntersectionOverUnion())  # 求各类别IoU的平均
        return mIoU

    def genConfusionMatrix(self, imgPredict, imgLabel):  #
        """
        同FCN中score.py的fast_hist()函数,计算混淆矩阵
        :param imgPredict:
        :param imgLabel:
        :return: 混淆矩阵
        """
        # remove classes from unlabeled pixels in gt image and predict
        mask = (imgLabel >= 0) & (imgLabel < self.numClass)
        label = self.numClass * imgLabel[mask] + imgPredict[mask]
        count = np.bincount(label, minlength=self.numClass ** 2)
        confusionMatrix = count.reshape(self.numClass, self.numClass)
        # print(confusionMatrix)
        return confusionMatrix

    def Frequency_Weighted_Intersection_over_Union(self):
        """
        FWIoU,频权交并比:为MIoU的一种提升,这种方法根据每个类出现的频率为其设置权重。
        FWIOU =     [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]
        """
        freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
        iu = np.diag(self.confusion_matrix) / (
                np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
                np.diag(self.confusion_matrix))
        FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
        return FWIoU

    def addBatch(self, imgPredict, imgLabel):
        assert imgPredict.shape == imgLabel.shape
        self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)  # 得到混淆矩阵
        return self.confusionMatrix

    def reset(self):
        self.confusionMatrix = np.zeros((self.numClass, self.numClass))

# 测试内容
if __name__ == '__main__':
    imgPredict = cv2.imread('1.png')
    imgLabel = cv2.imread('2.png')
    imgPredict = np.array(cv2.cvtColor(imgPredict, cv2.COLOR_BGR2GRAY) / 255., dtype=np.uint8)
    imgLabel = np.array(cv2.cvtColor(imgLabel, cv2.COLOR_BGR2GRAY) / 255., dtype=np.uint8)
    # imgPredict = np.array([0, 0, 1, 1, 2, 2])  # 可直接换成预测图片
    # imgLabel = np.array([0, 0, 1, 1, 2, 2])  # 可直接换成标注图片

    metric = SegmentationMetric(2)  # 2表示有2个分类,有几个分类就填几
    hist = metric.addBatch(imgPredict, imgLabel)
    pa = metric.pixelAccuracy()
    cpa = metric.classPixelAccuracy()
    mpa = metric.meanPixelAccuracy()
    IoU = metric.IntersectionOverUnion()
    mIoU = metric.meanIntersectionOverUnion()
    print('hist is :\n', hist)
    print('PA is : %f' % pa)
    print('cPA is :', cpa)  # 列表
    print('mPA is : %f' % mpa)
    print('IoU is : ', IoU)
    print('mIoU is : ', mIoU)

输出:

hist is :
 [[  43466.   11238.]
 [  11238. 2582058.]]
PA is : 0.991512
cPA is : [0.79456712 0.99566652]
mPA is : 0.895117
IoU is :  [0.65915502 0.99137043]
mIoU is :  0.8252627241326803

1.png

【pytorch】图像分割中IOU等评价指标的计算_第1张图片
2.png【pytorch】图像分割中IOU等评价指标的计算_第2张图片

在网络中使用时要保证调用的addBatch()的输入shape相同,数据类型相同(np.int32)

from metrics import SegmentationMetric
# 局部代码
for x, mask in dataloaders:
     out = model(x) # 网络的输出
     #---------------------------------------------------------
     pred = torch.where(out > 0.5, torch.ones_like(out), torch.zeros_like(out)) # 0.5为阈值
     pred, y = pred.cpu().numpy(), mask.cpu().numpy() # 转化为ndarray类型才能进行计算
     pred, y = pred.astype(np.int32), y.astype(np.int32) # 转化为整型
     
     metric = SegmentationMetric(2)  # 2个分类
     hist = metric.addBatch(pred, y)
     pa = metric.pixelAccuracy()
     cpa = metric.classPixelAccuracy()
     mpa = metric.meanPixelAccuracy()
     IoU = metric.IntersectionOverUnion()
     mIoU = metric.meanIntersectionOverUnion()
     print('--' * 20)
     print(
         'hist:{},\niou:{},\nmiou:{},\nPA:{},\ncPA:{},\nmPA:{}'.format(hist, IoU, mIoU, pa, cpa,
                                                                       mpa))
     #---------------------------------------------------------

你可能感兴趣的:(pytorch,深度学习)