Grad-CAM:utils.py源码解析

        Grad-CAM是使用任何目标概念的梯度(比如分类类别中的某一类的logits,甚至是caption任务中的输出),流入最后的卷积层,生成一个粗略定位图来突出显示图像中用于预测的重要区域。卷积神经网络在很多的任务中以及取得了重大突破,但是对于解释它们为什么这么预测,预测了什么是必须的是很有必要的。Class Activation Mapping(CAM)这个方法提供了一种在图像分类任务上识别 discriminative regions的方法(我的理解是将图片中对分类发挥作用的区域找出来),但是这个方法移除了CNN网络的全连接层( Section Approach会介绍这个方法)。论文中提出的方法Grad-CAM并没有修改模型结构,是CAM的泛化,并且能够应用在更大范围的CNN模型家族(mage classfication,image captioning,VOA)。

        另一篇代码解析:Grad-CAM:main_cnn.py代码解析

        论文链接:https://arxiv.org/abs/1610.02391

        代码用的是根据官方代码精炼后的一个代码,自己做了注解:

import cv2
import numpy as np


class ActivationsAndGradients:
    # 作用是捕获正向传播中的特征层A,以及反向传播中对于特征层A的梯度A'
    """ Class for extracting activations and
    registering gradients from targeted intermediate layers """

    def __init__(self, model, target_layers, reshape_transform):
        self.model = model
        self.gradients = []
        self.activations = []
        self.reshape_transform = reshape_transform
        self.handles = []
        for target_layer in target_layers:  # 遍历特征层A
            self.handles.append(
                target_layer.register_forward_hook(  # 注册hook函数,当前向传播到target_layer时,就会将数据传到self.save_activation这个函数
                    self.save_activation))
            # Backward compatibility with older pytorch versions:
            if hasattr(target_layer, 'register_full_backward_hook'):  # if语句是为了保证PyTorch的兼容性,因为在新旧版本中注册反向传播hook的函数不同
                self.handles.append(
                    target_layer.register_full_backward_hook(
                        self.save_gradient))
            else:
                self.handles.append(
                    target_layer.register_backward_hook(
                        self.save_gradient))

    def save_activation(self, module, input, output):
        activation = output  # 将网络层的输出赋给activation
        if self.reshape_transform is not None:
            activation = self.reshape_transform(activation)
        self.activations.append(activation.cpu().detach())  # 将activation指定到cpu上并切断,然后存到activations列表中

    def save_gradient(self, module, grad_input, grad_output):
        # Gradients are computed in reverse order
        grad = grad_output[0]  # 因为register_full_backward_hook()传进save_gradient()的是一个tuple,所以用0
        if self.reshape_transform is not None:  # reshape_transform方法一般只在Transformer架构中会使用
            grad = self.reshape_transform(grad)
        self.gradients = [grad.cpu().detach()] + self.gradients  # 因为梯度是由高层流向底层的,故不用append,用'+'将梯度添加在列表最前面

    def __call__(self, x):  # 正向传播,注意用的__call__方法
        self.gradients = []
        self.activations = []
        return self.model(x)

    def release(self):
        for handle in self.handles:
            handle.remove()


class GradCAM:
    def __init__(self,
                 model,
                 target_layers,
                 reshape_transform=None,
                 use_cuda=False):
        self.model = model.eval()  # 设置为验证模式
        self.target_layers = target_layers
        self.reshape_transform = reshape_transform
        self.cuda = use_cuda
        if self.cuda:
            self.model = model.cuda()

        self.activations_and_grads = ActivationsAndGradients(
            self.model, target_layers, reshape_transform)

    """ Get a vector of weights for every channel in the target layer.
        Methods that return weights channels,
        will typically need to only implement this function. """

    @staticmethod
    def get_cam_weights(grads):
        return np.mean(grads, axis=(2, 3), keepdims=True)  # 在维度2和3求均值,即在h和w方向上求均值,得到每个通道的均值

    @staticmethod
    def get_loss(output, target_category):
        loss = 0
        for i in range(len(target_category)):
            loss = loss + output[i, target_category[i]]  # i表示当前batch中第i张图片,a[m,n]是张量的索引方式,也可以用a[m][n]
        return loss

    def get_cam_image(self, activations, grads):
        weights = self.get_cam_weights(grads)  # 求出梯度在各个通道的均值
        weighted_activations = weights * activations  # 加权
        cam = weighted_activations.sum(axis=1)  # 求和
        return cam

    @staticmethod
    def get_target_width_height(input_tensor):
        width, height = input_tensor.size(-1), input_tensor.size(-2)
        return width, height

    def compute_cam_per_layer(self, input_tensor):
        activations_list = [a.cpu().data.numpy()
                            for a in self.activations_and_grads.activations]
        grads_list = [g.cpu().data.numpy()
                      for g in self.activations_and_grads.gradients]
        target_size = self.get_target_width_height(input_tensor)  # 得到输入图片的高度和宽度

        cam_per_target_layer = []
        # Loop over the saliency image from every layer

        for layer_activations, layer_grads in zip(activations_list, grads_list):
            cam = self.get_cam_image(layer_activations, layer_grads)  # 得到一张图片的CAM
            # 相当于ReLU
            cam[cam < 0] = 0  # works like mute the min-max scale in the function of scale_cam_image
            scaled = self.scale_cam_image(cam, target_size)
            cam_per_target_layer.append(scaled[:, None, :])

        return cam_per_target_layer

    def aggregate_multi_layers(self, cam_per_target_layer):
        cam_per_target_layer = np.concatenate(cam_per_target_layer, axis=1)
        cam_per_target_layer = np.maximum(cam_per_target_layer, 0)
        result = np.mean(cam_per_target_layer, axis=1)
        return self.scale_cam_image(result)

    @staticmethod
    def scale_cam_image(cam, target_size=None):  # 后处理,论文中没提的部分
        result = []
        for img in cam:  # 没太理解img是啥,个人感觉应该是一个batch某张图片的CAM
            # 这两行代码是将CAM中每一个元素缩放到0-1之间
            img = img - np.min(img)  # x = x-min
            img = img / (1e-7 + np.max(img))  # x = x/(max(x))
            if target_size is not None:  # 将CAM resize到原图尺寸
                img = cv2.resize(img, target_size)
            result.append(img)
        result = np.float32(result)

        return result

    def __call__(self, input_tensor, target_category=None):

        if self.cuda:
            input_tensor = input_tensor.cuda()

        # 正向传播得到网络输出logits(未经过softmax)
        output = self.activations_and_grads(input_tensor)  # ActivationsAndGradients()类中用__call__方法定义的前向传播过程,因此直接传input_tensor即可
        if isinstance(target_category, int):  # 实现一次性求多个图片的Grad_CAM
            target_category = [target_category] * input_tensor.size(0)  # 根据batch中图片的数量重新设置target_category
                                                                        # 也就是说一张图片target_category的索引是281的话,那么让这个列表变成有input_tensor.size(0)个元素,全是281
            # 新的target_category列表的长度等于当前传入batch中图片的数目

        if target_category is None:
            target_category = np.argmax(output.cpu().data.numpy(), axis=-1)
            print(f"category id: {target_category}")
        else:
            assert (len(target_category) == input_tensor.size(0))

        self.model.zero_grad()  # 清空模型梯度
        loss = self.get_loss(output, target_category)
        loss.backward(retain_graph=True)  # 反向传播,反向传播过程中会触发hook函数保存梯度信息

        # In most of the saliency attribution papers, the saliency is
        # computed with a single target layer.
        # Commonly it is the last convolutional layer.
        # Here we support passing a list with multiple target layers.
        # It will compute the saliency image for every image,
        # and then aggregate them (with a default mean aggregation).
        # This gives you more flexibility in case you just want to
        # use all conv layers for example, all Batchnorm layers,
        # or something else.
        cam_per_layer = self.compute_cam_per_layer(input_tensor)  # 得到每一个layer的CAM
        return self.aggregate_multi_layers(cam_per_layer)  # 对所有指定的layer进行融合,若只指定了一个layer,该函数不起任何作用

    def __del__(self):
        self.activations_and_grads.release()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, exc_tb):
        self.activations_and_grads.release()
        if isinstance(exc_value, IndexError):
            # Handle IndexError here...
            print(
                f"An exception occurred in CAM with block: {exc_type}. Message: {exc_value}")
            return True


def show_cam_on_image(img: np.ndarray,
                      mask: np.ndarray,
                      use_rgb: bool = False,
                      colormap: int = cv2.COLORMAP_JET) -> np.ndarray:
    """ This function overlays the cam mask on the image as an heatmap.
    By default the heatmap is in BGR format.

    :param img: The base image in RGB or BGR format.
    :param mask: The cam mask.
    :param use_rgb: Whether to use an RGB or BGR heatmap, this should be set to True if 'img' is in RGB format.
    :param colormap: The OpenCV colormap to be used.
    :returns: The default image with the cam overlay.
    """

    # 255*mask将grayscale_cam缩放到0-255之间;unit8是图片常用的格式;opencv的applyColorMap方法用于转化为彩色图片
    # 这一行对应PPT中上半分支的Color一步
    # 0对应红色,255对应蓝色,0-255为渐变,即BGR格式
    heatmap = cv2.applyColorMap(np.uint8(255 * mask), colormap)  # mask传入时为grayscale_cam

    if use_rgb:
        heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB)  # 转成RGB
    heatmap = np.float32(heatmap) / 255  # 再次缩放回0-1

    if np.max(img) > 1:
        raise Exception(
            "The input image should np.float32 in the range [0, 1]")

    # 得到最终热力图
    cam = heatmap + img
    cam = cam / np.max(cam)
    return np.uint8(255 * cam)


def center_crop_img(img: np.ndarray, size: int):
    h, w, c = img.shape

    if w == h == size:
        return img

    if w < h:
        ratio = size / w
        new_w = size
        new_h = int(h * ratio)
    else:
        ratio = size / h
        new_h = size
        new_w = int(w * ratio)

    img = cv2.resize(img, dsize=(new_w, new_h))

    if new_w == size:
        h = (new_h - size) // 2
        img = img[h: h+size]
    else:
        w = (new_w - size) // 2
        img = img[:, w: w+size]

    return img

Grad-CAM:utils.py源码解析_第1张图片

给个赞吧! 

你可能感兴趣的:(Deep,Learning,Tricks,#,可视化,深度学习,人工智能,pytorch)