https://www.jianshu.com/p/2fe73baa09b8?utm_source=oschina-app
实现思路:
1 处理单张图片作为网络输入。
2 根据给定的layer层,获取该层的输出结果features
。
3 考虑到features
的形状为:[batch_size, filter_nums, H, W] 提取其中的第一个过滤器得到的结果feature
。
4 以一张图片作为输入的情况下,我们得到的feature
即为[H,W]大小的tensor。
5 将tensor转为numpy,然后归一化到[0,1],最后乘255,使得范围为[0,255]
6 得到灰度图像并保存。
具体实现:
得到输出结果
up_edge, up_sal, up_sal_f = self.net_bone(images)
了解实现流程之后整个代码没有什么难度,对其中的关键点进行简单说明一下:
1 模型我用了在ImageNet预先训练好的vgg16作为示例。
2 打印模型结构可以看到每一层对应的id是什么。
3 通常选择conv
后面的特征进行可视化。
4 整个的实现放在类FeatureVisualization
中实现。
5 对于归一化到[0,1]的部分我用了sigmod方法。
sigmod方法
pred = np.squeeze(torch.sigmoid(up_sal_f[-1]).cpu().data.numpy())
代码流程:
# =>1 featue_map: 得到特征图
featue_map = self.net_bone(images)
# =>2 featue_map: 去掉维度是1的条目, 归一化,x255
# delete the dimension whose shape is 1
pred = np.squeeze(torch.sigmoid(featue_map[-1]).cpu().data.numpy())
featue_map = 255 * pred
# =>3 featue_map: write
cv2.imwrite(‘.../desk’, featue_map)
import cv2
import numpy as np
import torch
from torch.autograd import Variable
from torchvision import models
def preprocess_image(cv2im, resize_im=True):
"""
Processes image for CNNs
Args:
PIL_img (PIL_img): Image to process
resize_im (bool): Resize to 224 or not
returns:
im_as_var (Pytorch variable): Variable that contains processed float tensor
"""
# mean and std list for channels (Imagenet)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# Resize image
if resize_im:
cv2im = cv2.resize(cv2im, (224, 224))
im_as_arr = np.float32(cv2im)
im_as_arr = np.ascontiguousarray(im_as_arr[..., ::-1])
im_as_arr = im_as_arr.transpose(2, 0, 1) # Convert array to D,W,H
# Normalize the channels
for channel, _ in enumerate(im_as_arr):
im_as_arr[channel] /= 255
im_as_arr[channel] -= mean[channel]
im_as_arr[channel] /= std[channel]
# Convert to float tensor
im_as_ten = torch.from_numpy(im_as_arr).float()
# Add one more channel to the beginning. Tensor shape = 1,3,224,224
im_as_ten.unsqueeze_(0)
# Convert to Pytorch variable
im_as_var = Variable(im_as_ten, requires_grad=True)
return im_as_var
class FeatureVisualization():
def __init__(self,img_path,selected_layer):
self.img_path=img_path
self.selected_layer=selected_layer
self.pretrained_model = models.vgg16(pretrained=True).features
def process_image(self):
img=cv2.imread(self.img_path)
img=preprocess_image(img)
return img
def get_feature(self):
# input = Variable(torch.randn(1, 3, 224, 224))
input=self.process_image()
print(input.shape)
x=input
for index,layer in enumerate(self.pretrained_model):
x=layer(x)
if (index == self.selected_layer):
return x
def get_single_feature(self):
features=self.get_feature()
print(features.shape)
feature=features[:,0,:,:]
print(feature.shape)
feature=feature.view(feature.shape[1],feature.shape[2])
print(feature.shape)
return feature
def save_feature_to_img(self):
#to numpy
feature=self.get_single_feature()
feature=feature.data.numpy()
#use sigmod to [0,1]
feature= 1.0/(1+np.exp(-1*feature))
# to [0,255]
feature=np.round(feature*255)
print(feature[0])
cv2.imwrite('./img.jpg',feature)
if __name__=='__main__':
# get class
myClass=FeatureVisualization('./input_images/home.jpg',5)
print (myClass.pretrained_model)
myClass.save_feature_to_img()
下面是EGNet算法的图像可视化:
def test(self, test_mode=0):
EPSILON = 1e-8
img_num = len(self.test_loader)
time_t = 0.0
name_t = 'EGNet_ResNet50/'
if not os.path.exists(os.path.join(self.save_fold, name_t)): # './Result/saliency/ECSSD/' + 'EGNet_ResNet50/'
os.mkdir(os.path.join(self.save_fold, name_t))
for i, data_batch in enumerate(self.test_loader):
self.config.test_fold = self.save_fold
# print(self.config.test_fold) # ./Result/saliency/ECSSD/
images_, name, im_size = data_batch['image'], data_batch['name'][0], np.asarray(data_batch['size'])
'''
for every epoch => as follow
print(data_batch['name']) print(data_batch['name'][0])
['1.jpg'] 1.jpg
['2.jpg'] 2.jpg
... ...
['1000.jpg'] 1000.jpg
'''
with torch.no_grad():
images = Variable(images_)
print('=>2 self.config.cuda', self.config.cuda) # todo =>cuda?//
self.config.cuda = False
if self.config.cuda:
images = images.cuda()
print('=>22 self.config.cuda', self.config.cuda) # todo =>cuda?//
print('=>images.size:', images.size()) # torch.Size([1, 3, 267, 400])
time_start = time.time()
# /=== === === ===> up_sal_f <=== === ===\
# =>1 up_sal_f: get
up_edge, up_sal, up_sal_f = self.net_bone(images)
# self.net_bone(images)
# torch.cuda.synchronize() # todo =>cuda//
time_end = time.time()
print('=>single_img_time:', time_end - time_start)
time_t = time_t + time_end - time_start
# =>2 up_sal_f: to one, x255
# delete the dimension whose shape is 1
pred = np.squeeze(torch.sigmoid(up_sal_f[-1]).cpu().data.numpy())
multi_fuse = 255 * pred
# cv2.imwrite(os.path.join(self.config.test_fold,name_t, name[:-4] + '.png'), multi_fuse)
# =>3 up_sal_f: path
path_join = os.path.join(self.config.test_fold, name_t, name[:-4] + '.png')
# test_fold = './Result/saliency/ECSSD/
# name_t = 'EGNet_ResNet50/'
# name => 1.jpg
# path_join => ./Result/saliency/ECSSD/ + EGNet_ResNet50/ + 1 + .png
print('=>result_path:', path_join, '\n')
# =>4 up_sal_f: write
cv2.imwrite(path_join, multi_fuse)
# todo get_loader.dataset.save_folder() => test_fold + name_t = './Result/saliency/ECSSD/EGNet_ResNet50/'
# \=== === === ===> up_sal_f <=== === ===/
# /=== === === ===> up_edge <=== === ===\
# =>1 up_edge: get
# up_edge, up_sal, up_sal_f = self.net_bone(images)
# =>2 up_edge: to one
pred_edge = np.squeeze(torch.sigmoid(up_edge[-1]).cpu().data.numpy())
multi_fuse_edge = 255 * pred_edge
# =>3 up_edge: path
path_join_edge = os.path.join(self.config.test_fold, name_t, name[:-4] + '-edge.png')
# =>4 up_edge: write
cv2.imwrite(path_join_edge, multi_fuse_edge)
# \=== === === ===> up_edge <=== === ===/
# /=== === === ===> up_sal <=== === ===\
# =>1 up_sal: get
# up_edge, up_sal, up_sal_f = self.net_bone(images)
# =>2 up_sal_d3, up_sal_d4, up_sal_d5, up_sal_d6: to one, x255
pred_sal_d3 = np.squeeze(torch.sigmoid(up_sal[3]).cpu().data.numpy())
pred_sal_d4 = np.squeeze(torch.sigmoid(up_sal[2]).cpu().data.numpy())
pred_sal_d5 = np.squeeze(torch.sigmoid(up_sal[1]).cpu().data.numpy())
pred_sal_d6 = np.squeeze(torch.sigmoid(up_sal[0]).cpu().data.numpy())
multi_fuse_sal_d3 = 255 * pred_sal_d3
multi_fuse_sal_d4 = 255 * pred_sal_d4
multi_fuse_sal_d5 = 255 * pred_sal_d5
multi_fuse_sal_d6 = 255 * pred_sal_d6
# =>3 up_sal_d3, up_sal_d4, up_sal_d5, up_sal_d6: path
path_join_sal_d3 = os.path.join(self.config.test_fold, name_t, name[:-4] + '-sal_D3.png')
path_join_sal_d4 = os.path.join(self.config.test_fold, name_t, name[:-4] + '-sal_D4.png')
path_join_sal_d5 = os.path.join(self.config.test_fold, name_t, name[:-4] + '-sal_D5.png')
path_join_sal_d6 = os.path.join(self.config.test_fold, name_t, name[:-4] + '-sal_D6.png')
# =>4 up_sal_d3, up_sal_d4, up_sal_d5, up_sal_d6: write
cv2.imwrite(path_join_sal_d3, multi_fuse_sal_d3)
cv2.imwrite(path_join_sal_d4, multi_fuse_sal_d4)
cv2.imwrite(path_join_sal_d5, multi_fuse_sal_d5)
cv2.imwrite(path_join_sal_d6, multi_fuse_sal_d6)
# \=== === === ===> up_sal <=== === ===/
print("--- %s seconds ---" % (time_t))
print('Test Done!')