mmdetection-coco-实现单个图像的Recall和Precision计算(Recall是想找出漏检了哪些badcase,Precision是误检了哪些)

对该博客进行补充:

mmdetection-可视化按类别颜色显示结果(左边显示gt右边显示pred)-输出单个类别的APAR-通过计算单张图片的recall进行划分badcase

mmdetection-可视化按类别颜色显示结果(左边显示gt右边显示pred)-输出单个类别的APAR-通过计算单张图片的recall进行划分badcase_baidu_40840693的博客-CSDN博客

目标检测的评价指标(TP、TN、FP、FN、Precision、Recall、IoU、mIoU、AP、mAP)

目标检测的评价指标(TP、TN、FP、FN、Precision、Recall、IoU、mIoU、AP、mAP) - Geoffry - 博客园

目标检测的评价指标(TP、TN、FP、FN、Precision、Recall、IoU、mIoU、AP、mAP)

目标检测的评价指标(TP、TN、FP、FN、Precision、Recall、IoU、mIoU、AP、mAP)_XinyanH的博客-CSDN博客_fp是什么意思

mmdetection_inference.py

temp_bbox_overlaps.py

temp_coco_utils.py

temp_recall.py

mmdetection_inference.py

# -*- coding:utf-8 -*-
import os
import argparse
import cv2
import mmcv
import numpy as np
from pycocotools.coco import COCO, maskUtils
from mmdet.apis import init_detector, inference_detector
from temp_coco_utils import coco_eval

'''
用于生成可视化时候不同类别的颜色
'''
import colorsys
import random
def get_n_hls_colors(num):
    hls_colors = []
    i = 0
    step = 360.0 / num
    while i < 360:
        h = i
        s = 90 + random.random() * 10
        l = 50 + random.random() * 10
        _hlsc = [h / 360.0, l / 100.0, s / 100.0]
        hls_colors.append(_hlsc)
        i += step
    return hls_colors

def ncolors(num):
    rgb_colors = []
    if num < 1:
        return rgb_colors
    hls_colors = get_n_hls_colors(num)
    for hlsc in hls_colors:
        _r, _g, _b = colorsys.hls_to_rgb(hlsc[0], hlsc[1], hlsc[2])
        r, g, b = [int(x * 255.0) for x in (_r, _g, _b)]
        rgb_colors.append([r, g, b])
    return rgb_colors

'''
用于将结果保存到json文件
'''
def xyxy2xywh(bbox):
    _bbox = bbox.tolist()
    return [
        _bbox[0],
        _bbox[1],
        _bbox[2] - _bbox[0] + 1,
        _bbox[3] - _bbox[1] + 1,
    ]

def proposal2json(coco, results):

    imgIds = coco.getImgIds(catIds=[])
    categories = coco.dataset['categories']

    json_results = []
    for idx in range(len(imgIds)):
        img_id = imgIds[idx]

        bboxes = results[idx]
        for i in range(bboxes.shape[0]):
            data = dict()
            data['image_id'] = img_id
            data['bbox'] = xyxy2xywh(bboxes[i])
            data['score'] = float(bboxes[i][4])
            data['category_id'] = 1
            json_results.append(data)
    return json_results

def det2json(coco, results):

    imgIds = coco.getImgIds(catIds=[])
    categories = coco.dataset['categories']

    json_results = []
    for idx in range(len(imgIds)):
        img_id = imgIds[idx]

        result = results[idx]
        for label in range(len(result)):
            bboxes = result[label]
            for i in range(bboxes.shape[0]):
                data = dict()
                data['image_id'] = img_id
                data['bbox'] = xyxy2xywh(bboxes[i])
                data['score'] = float(bboxes[i][4])
                data['category_id'] = categories[label]['id']
                json_results.append(data)
    return json_results

def segm2json(coco, results):

    imgIds = coco.getImgIds(catIds=[])
    categories = coco.dataset['categories']

    bbox_json_results = []
    segm_json_results = []
    for idx in range(len(imgIds)):
        img_id = imgIds[idx]

        det, seg = results[idx]
        for label in range(len(det)):
            # bbox results
            bboxes = det[label]
            for i in range(bboxes.shape[0]):
                data = dict()
                data['image_id'] = img_id
                data['bbox'] = xyxy2xywh(bboxes[i])
                data['score'] = float(bboxes[i][4])
                data['category_id'] = categories[label]['id']
                bbox_json_results.append(data)

            # segm results
            # some detectors use different score for det and segm
            if len(seg) == 2:
                segms = seg[0][label]
                mask_score = seg[1][label]
            else:
                segms = seg[label]
                mask_score = [bbox[4] for bbox in bboxes]
            for i in range(bboxes.shape[0]):
                data = dict()
                data['image_id'] = img_id
                data['score'] = float(mask_score[i])
                data['category_id'] = categories[label]['id']
                segms[i]['counts'] = segms[i]['counts'].decode()
                data['segmentation'] = segms[i]
                segm_json_results.append(data)
    return bbox_json_results, segm_json_results

'''
路径创建函数
'''
def mkdir_os(path):
    if not os.path.exists(path):
        os.makedirs(path)

def main(args):
    eval_types = args.eval
    mkdir_os(args.output_vis_result)
    #异常为input图像没有GT-object,是一张纯背景图
    mkdir_os(os.path.join(args.output_vis_result, "recall", "abnormalcase"))
    mkdir_os(os.path.join(args.output_vis_result, "recall", "case"))
    mkdir_os(os.path.join(args.output_vis_result, "recall", "badcase"))

    mkdir_os(os.path.join(args.output_vis_result, "precision", "abnormalcase"))
    mkdir_os(os.path.join(args.output_vis_result, "precision", "case"))
    mkdir_os(os.path.join(args.output_vis_result, "precision", "badcase"))

    score_thr = 0.3
    model = init_detector(args.input_config_file, args.input_checkpoint_file, device='cuda:0')
    model.eval()

    '''
    生成可视化类别颜色,colorbar-第一个颜色是backgroud,保留项不做使用
    '''
    cnum = 8
    self_color = ncolors(cnum)
    colorbar_vis = np.zeros((cnum * 30, 100, 3), dtype=np.uint8)
    for ind, colo in enumerate(self_color):
        k_tm = np.ones((30, 100, 3), dtype=np.uint8) * np.array([colo[-1], colo[-2], colo[-3]])
        colorbar_vis[ind * 30:(ind + 1) * 30, 0:100] = k_tm
    cv2.imwrite('../colorbar_vis.png', colorbar_vis)

    coco = COCO(args.input_test_json)
    imgIds = coco.getImgIds(catIds=[])
    categories = coco.dataset['categories']

    results = []
    vis_imgpath = []
    vis_imgid = []
    num = 0
    count = len(imgIds)
    for idx in range(len(imgIds)):
        print(num,'/',count)
        num += 1

        img_id = imgIds[idx]
        img_info = coco.loadImgs(img_id)[0]

        file_name = img_info['file_name']
        img_path = os.path.join(args.input_test_img_path, file_name)
        result = inference_detector(model, img_path)
        results.append(result)
        vis_imgpath.append(img_path)
        vis_imgid.append(img_info['id'])

    if eval_types:
        print('Starting evaluate {}'.format(' and '.join(eval_types)))
        if eval_types == ['proposal_fast']:
            result_file = os.path.join("./result", "result_out.pkl")
            # 2021.1.13 by ynh
            recall_result, precision_result, coco_recall_result, coco_precision_result = coco_eval(result_file, eval_types, coco)
        else:
            if not isinstance(results[0], dict):
                out_file = os.path.join("./result", "result_out.pkl")
                result_files = dict()
                #faster_rcnn_r50_fpn_1x.py 走该分支 eval_types=['bbox']
                if isinstance(results[0], list):
                    json_results = det2json(coco, results)
                    result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
                    result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
                    mmcv.dump(json_results, result_files['bbox'])
                # mask_rcnn_r50_fpn_1x.py 走该分支 eval_types=['bbox','segm']
                elif isinstance(results[0], tuple):
                    json_results = segm2json(coco, results)
                    result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
                    result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
                    result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
                    mmcv.dump(json_results[0], result_files['bbox'])
                    mmcv.dump(json_results[1], result_files['segm'])
                elif isinstance(results[0], np.ndarray):
                    json_results = proposal2json(coco, results)
                    result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
                    mmcv.dump(json_results, result_files['proposal'])
                # 2021.1.13 by ynh
                recall_result, precision_result, coco_recall_result, coco_precision_result = coco_eval(result_files, eval_types, coco)
            else:
                for name in results[0]:
                    out_file = os.path.join("./result", "result_out.pkl")
                    print('\nEvaluating {}'.format(name))
                    outputs_ = [out[name] for out in results]
                    out_file = out_file + '.{}'.format(name)
                    result_files = dict()
                    if isinstance(outputs_[0], list):
                        json_results = det2json(coco, outputs_)
                        result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
                        result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
                        mmcv.dump(json_results, result_files['bbox'])
                    elif isinstance(outputs_[0], tuple):
                        json_results = segm2json(coco, outputs_)
                        result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
                        result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
                        result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
                        mmcv.dump(json_results[0], result_files['bbox'])
                        mmcv.dump(json_results[1], result_files['segm'])
                    elif isinstance(outputs_[0], np.ndarray):
                        json_results = proposal2json(coco, outputs_)
                        result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
                        mmcv.dump(json_results, result_files['proposal'])
                    # 2021.1.13 by ynh
                    recall_result, precision_result, coco_recall_result, coco_precision_result = coco_eval(result_files, eval_types, coco)





    #只关注bbox的recall
    print("\n", "单个类别进行评估", "\n")
    coco_recall = coco_recall_result[0]
    iStr = ' Categories={:>12s} | {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
    titleStr = 'Average Recall'
    typeStr = '(AR)'
    iouStr = '{:0.2f}:{:0.2f}'.format(0.50, 0.95)
    areaRng = 'all'
    maxDets = 100
    # dimension of recall: [TxKxAxM]
    s = coco_recall
    s = s[:]
    for m_ind, m_cls in enumerate(categories):
        temp_s = s[:, m_ind, 0, 2]
        if len(temp_s[temp_s > -1]) == 0:
            mean_s = -1
        else:
            mean_s = np.mean(temp_s[temp_s > -1])
        print(iStr.format(m_cls['name'], titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))

    coco_precision = coco_precision_result[0]
    iStr = ' Categories={:>12s} | {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
    titleStr = 'Average Precision'
    typeStr = '(AP)'
    iouStr = '{:0.2f}:{:0.2f}'.format(0.50, 0.95)
    areaRng = 'all'
    maxDets = 100
    # dimension of precision: [TxRxKxAxM]
    s = coco_precision
    # IoU
    s = s[:]
    for m_ind, m_cls in enumerate(categories):
        temp_s = s[:, :, m_ind, 0, 2]
        if len(temp_s[temp_s > -1]) == 0:
            mean_s = -1
        else:
            mean_s = np.mean(temp_s[temp_s > -1])
        print(iStr.format(m_cls['name'], titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))




    print("\n","数据分类:","\n")
    #只关注bbox的recall
    type_recall = np.array(recall_result[0])
    # 2021.1.13 by ynh
    type_precision = np.array(precision_result[0])
    catId_num, imgId_num = type_recall.shape[:2]
    vis_idx = 0
    num = 0
    for n_key in range(imgId_num):
        print(num,'/',imgId_num)
        num += 1
        imgId_recall = type_recall[:,n_key]
        # 2021.1.13 by ynh
        imgId_precision = type_precision[:,n_key]
        #召回率
        if ((imgId_recall > 0).sum())==0:
            recall = -1
        else:
            recall = np.sum(imgId_recall[imgId_recall > 0]) / (imgId_recall > 0).sum()

        # 2021.1.13 by ynh
        #准确率
        if ((imgId_precision > 0).sum())==0:
            precision = -1
        else:
            precision = np.sum(imgId_precision[imgId_precision > 0]) / (imgId_precision > 0).sum()


        #可视化流程
        result = results[n_key]

        # 判断bbox和segm
        if isinstance(result, tuple):
            bbox_result, segm_result = result
        else:
            bbox_result, segm_result = result, None
        #为bbox添加类别
        for m_key, m_val in enumerate(bbox_result):
            if m_val.shape[:2][0] > 0:
                rows, clos = m_val.shape[:2]
                m_temp = np.ones((rows, 1), dtype=np.float32)*m_key
                bbox_result[m_key] = np.hstack((m_val, m_temp))
            else:
                bbox_result[m_key] = np.empty(shape=(0, 6), dtype=np.float32)
        bboxes = np.vstack(bbox_result)

        if score_thr > 0:
            assert bboxes.shape[1] == 6
            #这里需要修改-----------
            #scores = bboxes[:, -1]
            scores = bboxes[:, -2]
            inds = scores > score_thr
            bboxes = bboxes[inds, :]

        # 用于通过左右方式显示原图和可视化图
        img = mmcv.imread(vis_imgpath[n_key])
        img = img.copy()
        oriimg = img.copy()

        # 画出mask
        mask_list = []
        if segm_result is not None:
            segms = mmcv.concat_list(segm_result)
            #这里需要修改-----------
            #inds = np.where(bboxes[:, -1] > score_thr)[0]
            inds = np.where(bboxes[:, -2] > score_thr)[0]
            np.random.seed(42)
            color_masks = np.random.randint(0, 256, (1, 3), dtype=np.uint8)
            for i in inds:
                i = int(i)
                mask = maskUtils.decode(segms[i]).astype(np.bool)
                img[mask] = img[mask] * 0.5 + color_masks * 0.5
                mask_list.append(mask)

        #画出bbox
        font_scale = 0.8
        thickness = 4
        #bbox_color = (0, 255, 0)
        #text_color = (0, 255, 0)
        for bbox in bboxes:
            #通过bbox[-1]获取颜色color_id = (id+1)
            bbox_color = self_color[int(bbox[-1]+1)][::-1]
            text_color = self_color[int(bbox[-1]+1)][::-1]

            bbox_int = bbox.astype(np.int32)
            left_top = (bbox_int[0], bbox_int[1])
            right_bottom = (bbox_int[2], bbox_int[3])
            cv2.rectangle(
                img, left_top, right_bottom, bbox_color, thickness=thickness)
            if len(bbox) > 4:
                label_text = '{:.02f}'.format(bbox[-1])
            cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 5),
                        cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)

        # 显示GT
        annIds = coco.getAnnIds(imgIds=vis_imgid[n_key], catIds=[], iscrowd=None)
        anns = coco.loadAnns(annIds)

        polygons = []
        color = []
        category_id_list = []
        for ann in anns:
            if 'segmentation' in ann:
                if type(ann['segmentation']) == list:
                    # polygon
                    for seg in ann['segmentation']:
                        poly = np.array(seg).reshape((int(len(seg) / 2), 2))
                        poly_list = poly.tolist()
                        polygons.append(poly_list)

                        # rgb-bgr
                        # mylist[start:end:step]
                        # 切片逆序[::-1]
                        if ann['iscrowd'] == 0 and ann["ignore"] == 0:
                            temp = self_color[ann['category_id']]
                            color.append(temp[::-1])
                        if ann['iscrowd'] == 1 or ann["ignore"] == 1:
                            temp = self_color[-1]
                            color.append(temp[::-1])
                        category_id_list.append(ann['category_id'])
                else:
                    print("error type(ann['segmentation']) != list")
                    exit()

        point_size = 2
        thickness = 4
        for key in range(len(polygons)):
            ndata = polygons[key]
            cur_color = color[key]
            label_id = category_id_list[key]

            label = 'error'
            for m_id in categories:
                if m_id['id']==label_id:
                    label = m_id['name']

            #segmentation
            if len(ndata)>2:
                for k in range(len(ndata)):
                    data = ndata[k]
                    cv2.circle(oriimg, (data[0], data[1]), point_size, (cur_color[0], cur_color[1], cur_color[2]),
                               thickness)
            else:#bbox
                cv2.rectangle(oriimg, (int(ndata[0][0]), int(ndata[0][1])), (int(ndata[1][0]), int(ndata[1][1])),
                              (cur_color[0], cur_color[1], cur_color[2]),
                              thickness)
                cv2.putText(oriimg, label, (int(ndata[0][0]), int(ndata[0][1])),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (cur_color[0], cur_color[1], cur_color[2]), 2)

        # 可视化显示mask + bbox
        h1, w1 = oriimg.shape[:2]
        h2, w2 = img.shape[:2]
        vis = np.zeros((max(h1, h2), w1 + w2, 3), np.uint8)
        vis[:h1, :w1, :] = oriimg
        vis[:h2, w1:w1 + w2, :] = img

        # 2021.1.13 by ynh
        vis_idx += 1
        if (0< recall < 1):
            out_file = os.path.join(args.output_vis_result, "recall", "badcase", 'recall_result_{}.jpg'.format(vis_idx))
        elif (recall == -1):
            out_file = os.path.join(args.output_vis_result, "recall", "abnormalcase", 'result_{}.jpg'.format(vis_idx))
        else:
            out_file = os.path.join(args.output_vis_result, "recall", "case", 'result_{}.jpg'.format(vis_idx))
        cv2.imwrite(out_file, vis)

        # 2021.1.13 by ynh
        if (0< precision < 1):
            out_file = os.path.join(args.output_vis_result, "precision", "badcase", 'precision_result_{}.jpg'.format(vis_idx))
        elif (precision == -1):
            out_file = os.path.join(args.output_vis_result, "precision", "abnormalcase", 'result_{}.jpg'.format(vis_idx))
        else:
            out_file = os.path.join(args.output_vis_result, "precision", "case", 'result_{}.jpg'.format(vis_idx))
        cv2.imwrite(out_file, vis)


if __name__ == "__main__":
    '''
    /mmdet/core/evaluation/recall.py
    /mmdet/core/evaluation/mean_ap.py
    /mmdet/core/evaluation/eval_hooks.py
    /mmdet/core/evaluation/coco_utils.py
    /mmdet/core/evaluation/class_names.py
    /mmdet/core/evaluation/bbox_overlaps.py
    
    /lib/python3.6/site-packages/pycocotools/cocoeval.py
    /lib/python3.6/site-packages/pycocotools/coco.py
    /lib/python3.6/site-packages/pycocotools/mask.py
    '''
    parser = argparse.ArgumentParser(
        description=
        "计算 mmdetection中的 mAP和AP")
    parser.add_argument('-icf',
                        "--input_config_file",
                        default='../configs/nut5_fine_faster_rcnn_r50_fpn_1x.py',
                        help="set input_config_file")
    parser.add_argument('-icp',
                        "--input_checkpoint_file",
                        default='../checkpoints/epoch_55.pth',
                        help="set input_checkpoint_file")
    parser.add_argument('-itj',
                        "--input_test_json",
                        #default='annotations/batch4-ZD-data_instances_test2017.json',
                        default='annotations/img-pre-5_instances_test2017.json',
                        help="set input_test_json")
    parser.add_argument('-itp',
                        "--input_test_img_path",
                        default='train2017',
                        help="set input_test_img_path")
    parser.add_argument('-ovt',
                        "--output_vis_result",
                        default='./result',
                        help="set output vis")
    parser.add_argument('--eval',
                        type=str,
                        nargs='+',
                        choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
                        default=['bbox'],
                        help='eval types')
    args = parser.parse_args()

    if args.output_vis_result is None:
        parser.print_help()
        exit()

    main(args)

temp_coco_utils.py

import mmcv
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval

from temp_recall import eval_recalls


def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000)):
    for res_type in result_types:
        assert res_type in [
            'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
        ]

    if mmcv.is_str(coco):
        coco = COCO(coco)
    assert isinstance(coco, COCO)

    if result_types == ['proposal_fast']:
        ar = fast_eval_recall(result_files, coco, np.array(max_dets))
        for i, num in enumerate(max_dets):
            print('AR@{}\t= {:.4f}'.format(num, ar[i]))
        return

    recall_list = [[] for _ in range(len(result_types))]
    # add 2021.1.13 by ynh
    precision_list = [[] for _ in range(len(result_types))]
    coco_recall_list = [[] for _ in range(len(result_types))]
    coco_precision_list = [[] for _ in range(len(result_types))]
    for m_key, res_type in enumerate(result_types):
        result_file = result_files[res_type]
        assert result_file.endswith('.json')

        coco_dets = coco.loadRes(result_file)
        img_ids = coco.getImgIds()
        iou_type = 'bbox' if res_type == 'proposal' else res_type
        cocoEval = COCOeval(coco, coco_dets, iou_type)
        cocoEval.params.imgIds = img_ids
        if res_type == 'proposal':
            cocoEval.params.useCats = 0
            cocoEval.params.maxDets = list(max_dets)
        #对给定的图像运行逐图像计算并将结果(dict列表)存储在self.evalImgs
        cocoEval.evaluate()
        #累积每个图像的评估结果并将结果存储到self.eval
        cocoEval.accumulate()
        #计算并显示评估结果,仅应用于默认参数设置
        cocoEval.summarize()

        #https://www.aiuai.cn/aifarm854.html
        #cocoEval.eval, cocoEval.evalImgs

        '''
        dimension of precision: [TxRxKxAxM]
        参考:https://zhuanlan.zhihu.com/p/60707912
        cocoEval.eval['precision']是一个5维的数组
        precision  - [TxRxKxAxM] precision for every evaluation setting
        catIds     - [all] K cat ids to use for evaluation
        iouThrs    - [.5:.05:.95] T=10 IoU thresholds for evaluation
        recThrs    - [0:.01:1] R=101 recall thresholds for evaluation
        areaRng    - [...] A=4 object area ranges for evaluation
        maxDets    - [1 10 100] M=3 thresholds on max detections per image
        第一维T:IoU的10个阈值,从0.5到0.95间隔0.05
        第二维R:101个recall 阈值,从0到101
        第三维K:类别,如果是想展示第一类的结果就设为0
        第四维A:area 目标的大小范围 (all,small, medium, large)(全部,小,中,大)
        第五维M:maxDets 单张图像中最多检测框的数量 三种 1,10,100
        coco_eval.eval['precision'][0, :, 0, 0, 2] 所表示的就是当IoU=0.5时
        从0到100的101个recall对应的101个precision的值
        '''

        #evalImgs每张图片的检测质量
        #eval整个数据集上的聚合检测质量
        '''
        return {
            'image_id':     imgId,
            'category_id':  catId,
            'aRng':         aRng,
            'maxDet':       maxDet,
            'dtIds':        [d['id'] for d in dt],
            'gtIds':        [g['id'] for g in gt],
            'dtMatches':    dtm,
            'gtMatches':    gtm,
            'dtScores':     [d['score'] for d in dt],
            'gtIgnore':     gtIg,
            'dtIgnore':     dtIg,
        }
        self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
         for catId in catIds
         for areaRng in p.areaRng
         for imgId in p.imgIds
        ]
        
        self.eval = {
            'params': p,
            'counts': [T, R, K, A, M],
            'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'precision': precision,
            'recall':   recall,
            'scores': scores,
        }
        '''

        #265

        p = cocoEval.params
        catIds = p.catIds if p.useCats else [-1]
        recalls = [[-1 for _ in range(len(p.imgIds))] for _ in range(len(catIds))]
        # add 2021.1.13 by ynh
        precision = [[-1 for _ in range(len(p.imgIds))] for _ in range(len(catIds))]
        #类别
        for m_catId, catId in enumerate(catIds):
            #图片
            for n_imgId, imgId in enumerate(p.imgIds):
                if p.useCats:
                    gt = cocoEval._gts[imgId, catId]
                    dt = cocoEval._dts[imgId, catId]
                else:
                    gt = [_ for cId in p.catIds for _ in cocoEval._gts[imgId, cId]]
                    dt = [_ for cId in p.catIds for _ in cocoEval._dts[imgId, cId]]
                if len(gt) == 0 and len(dt) == 0:
                    continue

                for g in gt:
                    if g['ignore']:
                        g['_ignore'] = 1
                    else:
                        g['_ignore'] = 0

                # sort dt highest score first, sort gt ignore last
                gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
                gt = [gt[i] for i in gtind]
                dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
                #取最大的框数目100
                dt = [dt[i] for i in dtind[0:100]]
                iscrowd = [int(o['iscrowd']) for o in gt]
                # load computed ious
                ious = cocoEval.ious[imgId, catId][:, gtind] if len(cocoEval.ious[imgId, catId]) > 0 else cocoEval.ious[imgId, catId]

                T = 1 # 我们只采用IOU阈值为0.5
                G = len(gt)
                D = len(dt)
                gtm = np.zeros((T, G))
                dtm = np.zeros((T, D))
                gtIg = np.array([g['_ignore'] for g in gt])
                dtIg = np.zeros((T, D))
                if not len(ious) == 0:
                    recall_TPFN = len(gt)
                    # add 2021.1.13 by ynh
                    precision_TPFP = len(dt)
                    # revise 2021.1.13 by ynh
                    object_TP = 0
                    for dind, d in enumerate(dt):
                        iou_thr = 0.5
                        m   = -1
                        for gind, g in enumerate(gt):
                            # if this gt already matched, and not a crowd, continue
                            # 我们只采用IOU阈值为0.5
                            if gtm[0,gind]>0 and not iscrowd[gind]:
                                continue
                            # if dt matched to reg gt, and on ignore gt, stop
                            if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
                                break
                            # continue to next gt unless better match made
                            if ious[dind,gind] < iou_thr:
                                continue
                            # if match successful and best so far, store appropriately
                            iou_thr=ious[dind,gind]
                            m=gind
                        # if match made store id of match for both dt and gt
                        if m ==-1:
                            continue
                        else:
                            # revise 2021.1.13 by ynh
                            object_TP += 1
                    # revise 2021.1.13 by ynh
                    recalls[m_catId][n_imgId] = float(object_TP/recall_TPFN)
                    # add 2021.1.13 by ynh
                    precision[m_catId][n_imgId] = float(object_TP/precision_TPFP)
                    #print(recalls[m_catId][n_imgId])
                    #print(precision[m_catId][n_imgId])
        recall_list[m_key] = recalls
        # add 2021.1.13 by ynh
        precision_list[m_key] = precision

        # dimension of recall: [TxKxAxM]
        s1 = cocoEval.eval['recall']
        # dimension of precision: [TxRxKxAxM]
        s2 = cocoEval.eval['precision']
        coco_recall_list[m_key] = s1
        coco_precision_list[m_key] = s2
    return recall_list, precision_list, coco_recall_list, coco_precision_list


def fast_eval_recall(results,
                     coco,
                     max_dets,
                     iou_thrs=np.arange(0.5, 0.96, 0.05)):
    if mmcv.is_str(results):
        assert results.endswith('.pkl')
        results = mmcv.load(results)
    elif not isinstance(results, list):
        raise TypeError(
            'results must be a list of numpy arrays or a filename, not {}'.
            format(type(results)))

    gt_bboxes = []
    img_ids = coco.getImgIds()
    for i in range(len(img_ids)):
        ann_ids = coco.getAnnIds(imgIds=img_ids[i])
        ann_info = coco.loadAnns(ann_ids)
        if len(ann_info) == 0:
            gt_bboxes.append(np.zeros((0, 4)))
            continue
        bboxes = []
        for ann in ann_info:
            if ann.get('ignore', False) or ann['iscrowd']:
                continue
            x1, y1, w, h = ann['bbox']
            bboxes.append([x1, y1, x1 + w - 1, y1 + h - 1])
        bboxes = np.array(bboxes, dtype=np.float32)
        if bboxes.shape[0] == 0:
            bboxes = np.zeros((0, 4))
        gt_bboxes.append(bboxes)

    recalls = eval_recalls(
        gt_bboxes, results, max_dets, iou_thrs, print_summary=False)
    ar = recalls.mean(axis=1)
    return ar


def xyxy2xywh(bbox):
    _bbox = bbox.tolist()
    return [
        _bbox[0],
        _bbox[1],
        _bbox[2] - _bbox[0] + 1,
        _bbox[3] - _bbox[1] + 1,
    ]


def proposal2json(dataset, results):
    json_results = []
    for idx in range(len(dataset)):
        img_id = dataset.img_ids[idx]
        bboxes = results[idx]
        for i in range(bboxes.shape[0]):
            data = dict()
            data['image_id'] = img_id
            data['bbox'] = xyxy2xywh(bboxes[i])
            data['score'] = float(bboxes[i][4])
            data['category_id'] = 1
            json_results.append(data)
    return json_results


def det2json(dataset, results):
    json_results = []
    for idx in range(len(dataset)):
        img_id = dataset.img_ids[idx]
        result = results[idx]
        for label in range(len(result)):
            bboxes = result[label]
            for i in range(bboxes.shape[0]):
                data = dict()
                data['image_id'] = img_id
                data['bbox'] = xyxy2xywh(bboxes[i])
                data['score'] = float(bboxes[i][4])
                data['category_id'] = dataset.cat_ids[label]
                json_results.append(data)
    return json_results


def segm2json(dataset, results):
    bbox_json_results = []
    segm_json_results = []
    for idx in range(len(dataset)):
        img_id = dataset.img_ids[idx]
        det, seg = results[idx]
        for label in range(len(det)):
            # bbox results
            bboxes = det[label]
            for i in range(bboxes.shape[0]):
                data = dict()
                data['image_id'] = img_id
                data['bbox'] = xyxy2xywh(bboxes[i])
                data['score'] = float(bboxes[i][4])
                data['category_id'] = dataset.cat_ids[label]
                bbox_json_results.append(data)

            # segm results
            # some detectors use different score for det and segm
            if len(seg) == 2:
                segms = seg[0][label]
                mask_score = seg[1][label]
            else:
                segms = seg[label]
                mask_score = [bbox[4] for bbox in bboxes]
            for i in range(bboxes.shape[0]):
                data = dict()
                data['image_id'] = img_id
                data['score'] = float(mask_score[i])
                data['category_id'] = dataset.cat_ids[label]
                segms[i]['counts'] = segms[i]['counts'].decode()
                data['segmentation'] = segms[i]
                segm_json_results.append(data)
    return bbox_json_results, segm_json_results


def results2json(dataset, results, out_file):
    result_files = dict()
    if isinstance(results[0], list):
        json_results = det2json(dataset, results)
        result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
        result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
        mmcv.dump(json_results, result_files['bbox'])
    elif isinstance(results[0], tuple):
        json_results = segm2json(dataset, results)
        result_files['bbox'] = '{}.{}.json'.format(out_file, 'bbox')
        result_files['proposal'] = '{}.{}.json'.format(out_file, 'bbox')
        result_files['segm'] = '{}.{}.json'.format(out_file, 'segm')
        mmcv.dump(json_results[0], result_files['bbox'])
        mmcv.dump(json_results[1], result_files['segm'])
    elif isinstance(results[0], np.ndarray):
        json_results = proposal2json(dataset, results)
        result_files['proposal'] = '{}.{}.json'.format(out_file, 'proposal')
        mmcv.dump(json_results, result_files['proposal'])
    else:
        raise TypeError('invalid type of results')
    return result_files

temp_bbox_overlaps.py

import numpy as np


def bbox_overlaps(bboxes1, bboxes2, mode='iou'):
    """Calculate the ious between each bbox of bboxes1 and bboxes2.

    Args:
        bboxes1(ndarray): shape (n, 4)
        bboxes2(ndarray): shape (k, 4)
        mode(str): iou (intersection over union) or iof (intersection
            over foreground)

    Returns:
        ious(ndarray): shape (n, k)
    """

    assert mode in ['iou', 'iof']

    bboxes1 = bboxes1.astype(np.float32)
    bboxes2 = bboxes2.astype(np.float32)
    rows = bboxes1.shape[0]
    cols = bboxes2.shape[0]
    ious = np.zeros((rows, cols), dtype=np.float32)
    if rows * cols == 0:
        return ious
    exchange = False
    if bboxes1.shape[0] > bboxes2.shape[0]:
        bboxes1, bboxes2 = bboxes2, bboxes1
        ious = np.zeros((cols, rows), dtype=np.float32)
        exchange = True
    area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (
        bboxes1[:, 3] - bboxes1[:, 1] + 1)
    area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (
        bboxes2[:, 3] - bboxes2[:, 1] + 1)
    for i in range(bboxes1.shape[0]):
        x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])
        y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])
        x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])
        y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])
        overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(
            y_end - y_start + 1, 0)
        if mode == 'iou':
            union = area1[i] + area2 - overlap
        else:
            union = area1[i] if not exchange else area2
        ious[i, :] = overlap / union
    if exchange:
        ious = ious.T
    return ious

temp_recall.py

import numpy as np
from terminaltables import AsciiTable

from temp_bbox_overlaps import bbox_overlaps


def _recalls(all_ious, proposal_nums, thrs):

    img_num = all_ious.shape[0]
    total_gt_num = sum([ious.shape[0] for ious in all_ious])

    _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)
    for k, proposal_num in enumerate(proposal_nums):
        tmp_ious = np.zeros(0)
        for i in range(img_num):
            ious = all_ious[i][:, :proposal_num].copy()
            gt_ious = np.zeros((ious.shape[0]))
            if ious.size == 0:
                tmp_ious = np.hstack((tmp_ious, gt_ious))
                continue
            for j in range(ious.shape[0]):
                gt_max_overlaps = ious.argmax(axis=1)
                max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]
                gt_idx = max_ious.argmax()
                gt_ious[j] = max_ious[gt_idx]
                box_idx = gt_max_overlaps[gt_idx]
                ious[gt_idx, :] = -1
                ious[:, box_idx] = -1
            tmp_ious = np.hstack((tmp_ious, gt_ious))
        _ious[k, :] = tmp_ious

    _ious = np.fliplr(np.sort(_ious, axis=1))
    recalls = np.zeros((proposal_nums.size, thrs.size))
    for i, thr in enumerate(thrs):
        recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)

    return recalls


def set_recall_param(proposal_nums, iou_thrs):
    """Check proposal_nums and iou_thrs and set correct format.
    """
    if isinstance(proposal_nums, list):
        _proposal_nums = np.array(proposal_nums)
    elif isinstance(proposal_nums, int):
        _proposal_nums = np.array([proposal_nums])
    else:
        _proposal_nums = proposal_nums

    if iou_thrs is None:
        _iou_thrs = np.array([0.5])
    elif isinstance(iou_thrs, list):
        _iou_thrs = np.array(iou_thrs)
    elif isinstance(iou_thrs, float):
        _iou_thrs = np.array([iou_thrs])
    else:
        _iou_thrs = iou_thrs

    return _proposal_nums, _iou_thrs


def eval_recalls(gts,
                 proposals,
                 proposal_nums=None,
                 iou_thrs=None,
                 print_summary=True):
    """Calculate recalls.

    Args:
        gts(list or ndarray): a list of arrays of shape (n, 4)
        proposals(list or ndarray): a list of arrays of shape (k, 4) or (k, 5)
        proposal_nums(int or list of int or ndarray): top N proposals
        thrs(float or list or ndarray): iou thresholds

    Returns:
        ndarray: recalls of different ious and proposal nums
    """

    img_num = len(gts)
    assert img_num == len(proposals)

    proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)

    all_ious = []
    for i in range(img_num):
        if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:
            scores = proposals[i][:, 4]
            sort_idx = np.argsort(scores)[::-1]
            img_proposal = proposals[i][sort_idx, :]
        else:
            img_proposal = proposals[i]
        prop_num = min(img_proposal.shape[0], proposal_nums[-1])
        if gts[i] is None or gts[i].shape[0] == 0:
            ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)
        else:
            ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4])
        all_ious.append(ious)
    all_ious = np.array(all_ious)
    recalls = _recalls(all_ious, proposal_nums, iou_thrs)
    if print_summary:
        print_recall_summary(recalls, proposal_nums, iou_thrs)
    return recalls


def print_recall_summary(recalls,
                         proposal_nums,
                         iou_thrs,
                         row_idxs=None,
                         col_idxs=None):
    """Print recalls in a table.

    Args:
        recalls(ndarray): calculated from `bbox_recalls`
        proposal_nums(ndarray or list): top N proposals
        iou_thrs(ndarray or list): iou thresholds
        row_idxs(ndarray): which rows(proposal nums) to print
        col_idxs(ndarray): which cols(iou thresholds) to print
    """
    proposal_nums = np.array(proposal_nums, dtype=np.int32)
    iou_thrs = np.array(iou_thrs)
    if row_idxs is None:
        row_idxs = np.arange(proposal_nums.size)
    if col_idxs is None:
        col_idxs = np.arange(iou_thrs.size)
    row_header = [''] + iou_thrs[col_idxs].tolist()
    table_data = [row_header]
    for i, num in enumerate(proposal_nums[row_idxs]):
        row = [
            '{:.3f}'.format(val)
            for val in recalls[row_idxs[i], col_idxs].tolist()
        ]
        row.insert(0, num)
        table_data.append(row)
    table = AsciiTable(table_data)
    print(table.table)


def plot_num_recall(recalls, proposal_nums):
    """Plot Proposal_num-Recalls curve.

    Args:
        recalls(ndarray or list): shape (k,)
        proposal_nums(ndarray or list): same shape as `recalls`
    """
    if isinstance(proposal_nums, np.ndarray):
        _proposal_nums = proposal_nums.tolist()
    else:
        _proposal_nums = proposal_nums
    if isinstance(recalls, np.ndarray):
        _recalls = recalls.tolist()
    else:
        _recalls = recalls

    import matplotlib.pyplot as plt
    f = plt.figure()
    plt.plot([0] + _proposal_nums, [0] + _recalls)
    plt.xlabel('Proposal num')
    plt.ylabel('Recall')
    plt.axis([0, proposal_nums.max(), 0, 1])
    f.show()


def plot_iou_recall(recalls, iou_thrs):
    """Plot IoU-Recalls curve.

    Args:
        recalls(ndarray or list): shape (k,)
        iou_thrs(ndarray or list): same shape as `recalls`
    """
    if isinstance(iou_thrs, np.ndarray):
        _iou_thrs = iou_thrs.tolist()
    else:
        _iou_thrs = iou_thrs
    if isinstance(recalls, np.ndarray):
        _recalls = recalls.tolist()
    else:
        _recalls = recalls

    import matplotlib.pyplot as plt
    f = plt.figure()
    plt.plot(_iou_thrs + [1.0], _recalls + [0.])
    plt.xlabel('IoU')
    plt.ylabel('Recall')
    plt.axis([iou_thrs.min(), 1, 0, 1])
    f.show()

你可能感兴趣的:(深度学习,r语言,目标检测,深度学习)