可视化成为热力图

# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
#
# from torchvision.utils import make_grid, save_image
#
#
# import argparse
# import os
# import pprint
#
# import torch
# import torch.nn.parallel
# import torch.backends.cudnn as cudnn
#
#
# import _init_paths
# from config import cfg
# from config import update_config
# from core.loss import JointsMSELoss
# # from core.function import validate,validate_six_scale
# from core.function import validate
# from utils.utils import create_logger
#
# import os
# import torch
# import torchvision as tv
# import torchvision.transforms as transforms
# import cv2
# import argparse
# import dataset
# import models
# from PIL import Image
# import numpy as np
#
# def parse_args():
#     parser = argparse.ArgumentParser(description='Train keypoints network')
#     parser.add_argument('--cfg',
#                         help='experiment configure file name',
#                         type=str,
#                         default='/media/zxl/E/zxl/code/experiments/mpii/hgcpef/hg8_256x256_d256x3_adam_lr2.5e-4.yaml')
#     parser.add_argument('opts',
#                         help="Modify config options using the command-line",
#                         default=None,
#                         nargs=argparse.REMAINDER)
#
#     parser.add_argument('--modelDir',
#                         help='model directory',
#                         type=str,
#                         default='')
#     parser.add_argument('--logDir',
#                         help='log directory',
#                         type=str,
#                         default='')
#     parser.add_argument('--dataDir',
#                         help='data directory',
#                         type=str,
#                         default='')
#     parser.add_argument('--prevModelDir',
#                         help='prev Model directory',
#                         type=str,
#                         default='')
#
#     args = parser.parse_args()
#     return args
# # model
# args = parse_args()
# update_config(cfg, args)
#
# logger, final_output_dir, tb_log_dir = create_logger(
#     cfg, args.cfg, 'valid')
#
# logger.info(pprint.pformat(args))
# logger.info(cfg)
#
# # cudnn related setting
# cudnn.benchmark = cfg.CUDNN.BENCHMARK
# torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
# torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# # get devices
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# print("using {} device.".format(device))
#
# # create model
# model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
#     cfg, is_train=False
# )
# # print('*******************',model)
#
# if cfg.TEST.MODEL_FILE:
#     logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
#     model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
# else:
#     model_state_file = os.path.join(
#         final_output_dir, 'final_state.pth'
#     )
#     logger.info('=> loading model from {}'.format(model_state_file))
#     model.load_state_dict(torch.load(model_state_file))
# net= torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
#
# # net = models.vgg16_bn(pretrained=True).cuda()
# # image pre-process
# transforms_input = transforms.Compose([transforms.Resize((256, 256)),
#                                        transforms.ToTensor(),
#                                        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
#
# fImg = Image.open("/media/zxl/E/zxl/000003072.jpg").convert('RGB')
# data = transforms_input(fImg).unsqueeze(0).cuda()
# # feature image save path
# FEATURE_FOLDER = "/media/zxl/E/zxl/code/ffffff/"
# if not os.path.exists(FEATURE_FOLDER):
#     os.mkdir(FEATURE_FOLDER)
# # three global vatiable for feature image name
# feature_list = list()
# count = 0
# idx = 0
#
#
# #################
# def get_image_path_for_hook(module):
#     global count
#     image_name = feature_list[count] + ".png"
#     count += 1
#     image_path = os.path.join(FEATURE_FOLDER, image_name)
#     return image_path
#
#
# def hook_func(module, input, output):
#     image_path = get_image_path_for_hook(module)
#     data = output.clone().detach()
#     global idx
#     print(idx, "->", data.shape)
#     idx += 1
#     data = data.data.permute(1, 0, 2, 3)
#     # print('data=',data.shape)
#     save_image(data, image_path, normalize=False)
#
#
#
#
# for name, module in net.named_modules():
#
#     if isinstance(module, torch.nn.Conv2d):
#         print(name)
#         feature_list.append(name)
#         module.register_forward_hook(hook_func)
#
# out = net(data)
#


####################################
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
#
# from torchvision.utils import make_grid, save_image
#
#
# import argparse
# import os
# import pprint
#
# import torch
# import torch.nn.parallel
# import torch.backends.cudnn as cudnn
#
#
# import _init_paths
# from config import cfg
# from config import update_config
# from core.loss import JointsMSELoss
# # from core.function import validate,validate_six_scale
# from core.function import validate
# from utils.utils import create_logger
#
# import os
# import torch
# import torchvision as tv
# import torch.nn.functional as F
# import torchvision.transforms as transforms
# import cv2
# import argparse
# import dataset
# import models
# from PIL import Image
# import numpy as np
#
# def parse_args():
#     parser = argparse.ArgumentParser(description='Train keypoints network')
#     parser.add_argument('--cfg',
#                         help='experiment configure file name',
#                         type=str,
#                         default='/media/zxl/E/zxl/code/experiments/mpii/hgcpef/hg8_256x256_d256x3_adam_lr2.5e-4.yaml')
#     parser.add_argument('opts',
#                         help="Modify config options using the command-line",
#                         default=None,
#                         nargs=argparse.REMAINDER)
#
#     parser.add_argument('--modelDir',
#                         help='model directory',
#                         type=str,
#                         default='')
#     parser.add_argument('--logDir',
#                         help='log directory',
#                         type=str,
#                         default='')
#     parser.add_argument('--dataDir',
#                         help='data directory',
#                         type=str,
#                         default='')
#     parser.add_argument('--prevModelDir',
#                         help='prev Model directory',
#                         type=str,
#                         default='')
#
#     args = parser.parse_args()
#     return args
# # model
# args = parse_args()
# update_config(cfg, args)
#
# logger, final_output_dir, tb_log_dir = create_logger(
#     cfg, args.cfg, 'valid')
#
# logger.info(pprint.pformat(args))
# logger.info(cfg)
#
# # cudnn related setting
# cudnn.benchmark = cfg.CUDNN.BENCHMARK
# torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
# torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# # get devices
# device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# print("using {} device.".format(device))
#
# # create model
# model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
#     cfg, is_train=False
# )
# # print('*******************',model)
#
# if cfg.TEST.MODEL_FILE:
#     logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
#     model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
# else:
#     model_state_file = os.path.join(
#         final_output_dir, 'final_state.pth'
#     )
#     logger.info('=> loading model from {}'.format(model_state_file))
#     model.load_state_dict(torch.load(model_state_file))
# net= torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
#
# # net = models.vgg16_bn(pretrained=True).cuda()
# # image pre-process
# transforms_input = transforms.Compose([transforms.Resize((256, 256)),
#                                        transforms.ToTensor(),
#                                        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
#
# fImg = Image.open("/media/zxl/E/zxl/000003072.jpg").convert('RGB')
# data = transforms_input(fImg).unsqueeze(0).cuda()
# # feature image save path
# FEATURE_FOLDER = "/media/zxl/E/zxl/code/ffffff/"
# if not os.path.exists(FEATURE_FOLDER):
#     os.mkdir(FEATURE_FOLDER)
# # three global vatiable for feature image name
# feature_list = list()
# count = 0
# idx = 0
#
#
# #################
# def get_image_path_for_hook(module):
#     global count
#     image_name = feature_list[count] + ".png"
#     count += 1
#     image_path = os.path.join(FEATURE_FOLDER, image_name)
#     return image_path
#
# def save_image(tensor, fp, nrow=1, padding=0,
#                normalize=False, range=None, scale_each=False, pad_value=0, format=None):
#     """Save a given Tensor into an image file.
#
#     Args:
#         tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
#             saves the tensor as a grid of images by calling ``make_grid``.
#         fp (string or file object): A filename or a file object
#         format(Optional):  If omitted, the format to use is determined from the filename extension.
#             If a file object was used instead of a filename, this parameter should always be used.
#         **kwargs: Other arguments are documented in ``make_grid``.
#     """
#     from PIL import Image
#     grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
#                      normalize=normalize, range=range, scale_each=scale_each)
#     # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
#     ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
#     im = Image.fromarray(ndarr)
#     im.save(fp, format=format)
#
# def hook_func(module, input, output):
#     image_path = get_image_path_for_hook(module)
#     data = output.clone().detach()
#     global idx
#     print(idx, "->", data.shape)
#     idx += 1
#     data = data.data.permute(1, 0, 2, 3)
#     # print('data=',data.shape)
#     save_image(data, image_path, normalize=False)
#
# # layer=1
# for name, module in net.named_modules():
#
#     if isinstance(module, torch.nn.Conv2d):
#         print(name)
#         feature_list.append(name)
#         module.register_forward_hook(hook_func)
#
# out = net(data)
#
###########################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from torchvision.utils import make_grid, save_image


import argparse
import os
import pprint

import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn


import _init_paths
from config import cfg
from config import update_config
from core.loss import JointsMSELoss
# from core.function import validate,validate_six_scale
from core.function import validate
from utils.utils import create_logger

import os
import torch
import torchvision as tv
import torch.nn.functional as F
import torchvision.transforms as transforms
import cv2
import argparse
import dataset
import models
from PIL import Image
import numpy as np

def parse_args():
    parser = argparse.ArgumentParser(description='Train keypoints network')
    parser.add_argument('--cfg',
                        help='experiment configure file name',
                        type=str,
                        default='/media/zxl/E/zxl/code/experiments/mpii/hgcpef/hg8_256x256_d256x3_adam_lr2.5e-4.yaml')
    parser.add_argument('opts',
                        help="Modify config options using the command-line",
                        default=None,
                        nargs=argparse.REMAINDER)

    parser.add_argument('--modelDir',
                        help='model directory',
                        type=str,
                        default='')
    parser.add_argument('--logDir',
                        help='log directory',
                        type=str,
                        default='')
    parser.add_argument('--dataDir',
                        help='data directory',
                        type=str,
                        default='')
    parser.add_argument('--prevModelDir',
                        help='prev Model directory',
                        type=str,
                        default='')

    args = parser.parse_args()
    return args
# model
args = parse_args()
update_config(cfg, args)

logger, final_output_dir, tb_log_dir = create_logger(
    cfg, args.cfg, 'valid')

logger.info(pprint.pformat(args))
logger.info(cfg)

# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# get devices
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("using {} device.".format(device))

# create model
model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
    cfg, is_train=False
)
# print('*******************',model)

if cfg.TEST.MODEL_FILE:
    logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
    model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
else:
    model_state_file = os.path.join(
        final_output_dir, 'final_state.pth'
    )
    logger.info('=> loading model from {}'.format(model_state_file))
    model.load_state_dict(torch.load(model_state_file))
net= torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()

# net = models.vgg16_bn(pretrained=True).cuda()
# image pre-process
transforms_input = transforms.Compose([transforms.Resize((256, 256)),
                                       transforms.ToTensor(),
                                       transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])

fImg = Image.open("/media/zxl/E/zxl/000003072.jpg").convert('RGB')
data = transforms_input(fImg).unsqueeze(0).cuda()
# feature image save path
FEATURE_FOLDER = "/media/zxl/E/zxl/code/ffffff/"
if not os.path.exists(FEATURE_FOLDER):
    os.mkdir(FEATURE_FOLDER)
# three global vatiable for feature image name
feature_list = list()
count = 0
idx = 0


#################
def get_image_path_for_hook(module):
    global count
    image_name = feature_list[count] + ".png"
    # image_name = feature_list[count]
    count += 1
    image_path = os.path.join(FEATURE_FOLDER, image_name)
    return image_path



def save_image(tensor, fp, nrow=1, padding=0,
               normalize=False, range=None, scale_each=False, pad_value=0, format=None):
    """Save a given Tensor into an image file.

    Args:
        tensor (Tensor or list): Image to be saved. If given a mini-batch tensor,
            saves the tensor as a grid of images by calling ``make_grid``.
        fp (string or file object): A filename or a file object
        format(Optional):  If omitted, the format to use is determined from the filename extension.
            If a file object was used instead of a filename, this parameter should always be used.
        **kwargs: Other arguments are documented in ``make_grid``.
    """
    from PIL import Image
    grid = make_grid(tensor[0].detach().cpu().unsqueeze(dim=1), nrow=nrow, padding=padding, pad_value=pad_value,
                     normalize=normalize, range=range, scale_each=scale_each)
    # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
    ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
    im = Image.fromarray(ndarr)
    im.convert('L').save((fp))
    # im.save(fp, format=format)

def hook_func(module, input, output):
    print(module)
    image_path = get_image_path_for_hook(module)
    # image_path = os.path.join(get_image_path_for_hook(module),".png")
    # print('image_path===',image_path)#image_path=== /media/zxl/E/zxl/code/ffffff/module.hg.3.hg.3.2.0.conv1.png os.path.join
    data = output.clone().detach()
    # data = F.interpolate(data,(256,256))
    global idx
    print(idx, "->", data.shape)
    idx += 1
    # data = data.data.permute(1, 0, 2, 3)
    data = data.data.permute(1, 0, 2, 3).cpu().squeeze()
    pic = (np.mean(data.numpy(),axis=0)*255).astype(np.uint8)
    features =cv2.resize(pic,(256,256))
    features = (features-np.amin(features))/(np.amax(features)-np.amin(features)+1e-5)
    features = np.round(features*255)
    features = cv2.applyColorMap(np.array(features,np.uint8),2)
    # features = cv2.applyColorMap( np.uint8(features), cv2.COLORAMP_JET)
    cv2.imwrite(image_path,features)
    # import matplotlib.pyplot as plt
    # plt.imshow(data,camp='jet')
    # print('data=',data.shape)
    # save_image(data, image_path, normalize=False)

# layer=1
# name = 'module.conv1'
for name, module in net.named_modules():

    if isinstance(module, torch.nn.Conv2d):
        print(name)
        feature_list.append(name)
        module.register_forward_hook(hook_func)
        # module.conv1.register_forward_hook(hook_func)

out = net(data)

你可能感兴趣的:(pytorch,人体姿态估计,可视化方法,深度学习,python,pytorch)