pytorch模型调用和速度测试

硬件:

pytorch模型调用和速度测试_第1张图片

 

1、测试模型:NIMA的pytorch版本

来源:https://github.com/truskovskiyk/nima.pytorch

模型:pretrain-model.pth

速度:平均一张图300ms左右

5.152941832318902
5.508232474792749
4.2969538709148765
5.8256916594691575
5.338452965952456
5.439441880211234
4.638633335009217
avg time:  304.443359375  ms

测试调用代码:


from nima.inference.inference_model import InferenceModel
import os
import time

if __name__ == '__main__':
    path_model = '/Users/xuqiong/code/image/NIMA/nima.pytorch/pretrain-model.pth'
    model = InferenceModel(path_to_model=path_model)

    dir = '/Users/xuqiong/code/image/NIMA/nima.pytorch/test2'
    #dir = '/Users/xuqiong/code/image/data/tid2013/distorted_images'
    filelist = []
    filenames = os.listdir(dir)
    timeall = 0
    for fn in filenames:
        if fn == '.DS_Store':
            continue
        fullfilename = os.path.join(dir, fn)
        filelist.append(fullfilename)

    for i in range(0, len(filelist)):
        path_image = filelist[i]

        try:
         time0 = time.time()
         result = model.predict_from_file(path_image)#1-10分
         time1 = time.time()
         timed = time1 - time0
         timeall = timeall + timed
         print(result["mean_score"])
        except:
            continue
    print("avg time: ", timeall*1000/len(filelist), " ms")

 

2、测试模型:shufflenetv2

来源:https://github.com/miaow1988/ShuffleNet_V2_pytorch_caffe/releases

模型:shufflenet_v2_x0.25

速度:平均18ms

18.815040588378906
22.640228271484375
17.702817916870117
17.43292808532715
17.948150634765625
17.450809478759766
17.821073532104492
avg time:  18.544435501098633  ms

测试调用代码:

#!/usr/bin/env python
# encoding: utf-8

import torch
import torch.nn as nn
import os
import time

import slim
from slim import g_name

class BasicBlock(nn.Module):

    def __init__(self, name, in_channels, out_channels, stride, dilation):
        super(BasicBlock, self).__init__()
        self.g_name = name
        self.in_channels = in_channels
        self.stride = stride
        channels = out_channels//2
        if stride == 1:
            assert in_channels == out_channels
            self.conv = nn.Sequential(
                slim.conv_bn_relu(name + '/conv1', channels, channels, 1),
                slim.conv_bn(name + '/conv2',
                    channels, channels, 3, stride=stride,
                    dilation=dilation, padding=dilation, groups=channels),
                slim.conv_bn_relu(name + '/conv3', channels, channels, 1),
            )
        else:
            self.conv = nn.Sequential(
                slim.conv_bn_relu(name + '/conv1', in_channels, channels, 1),
                slim.conv_bn(name + '/conv2',
                    channels, channels, 3, stride=stride,
                    dilation=dilation, padding=dilation, groups=channels),
                slim.conv_bn_relu(name + '/conv3', channels, channels, 1),
            )
            self.conv0 = nn.Sequential(
                slim.conv_bn(name + '/conv4',
                    in_channels, in_channels, 3, stride=stride,
                    dilation=dilation, padding=dilation, groups=in_channels),
                slim.conv_bn_relu(name + '/conv5', in_channels, channels, 1),
            )
        self.shuffle = slim.channel_shuffle(name + '/shuffle', 2)

    def forward(self, x):
        if self.stride == 1:
            x1 = x[:, :(x.shape[1]//2), :, :]
            x2 = x[:, (x.shape[1]//2):, :, :]
            x = torch.cat((x1, self.conv(x2)), 1)
        else:
            x = torch.cat((self.conv0(x), self.conv(x)), 1)
        return self.shuffle(x)


class Network(nn.Module):

    def __init__(self, num_classes, width_multiplier):
        super(Network, self).__init__()
        assert width_multiplier in (0.25, 0.5, 1.0, 1.5, 2.0)
        self.num_classes = num_classes
        in_channels = 24
        width_config = {
            0.25: (24, 48, 96, 512),
            0.5: (48, 96, 192, 1024),
            1.0: (116, 232, 464, 1024),
            1.5: (176, 352, 704, 1024),
            2.0: (244, 488, 976, 2048),
        }

        # outputs, stride, dilation, blocks, type
        self.network_config = [
            g_name('data/bn', nn.BatchNorm2d(3)),
            slim.conv_bn_relu('stage1/conv', 3, in_channels, 3, 2, 1),
            # g_name('stage1/pool', nn.MaxPool2d(3, 2, 1)),
            g_name('stage1/pool', nn.MaxPool2d(3, 2, 0, ceil_mode=True)),
            (width_config[width_multiplier][0], 2, 1, 4, 'b'),
            (width_config[width_multiplier][1], 2, 1, 8, 'b'), # x16
            (width_config[width_multiplier][2], 2, 1, 4, 'b'), # x32
            slim.conv_bn_relu('conv5', width_config[width_multiplier][2], width_config[width_multiplier][3], 1),
            g_name('pool', nn.AvgPool2d(7, 1)),
            g_name('fc', nn.Conv2d(width_config[width_multiplier][3], self.num_classes, 1)),
        ]
        self.network = []
        for i, config in enumerate(self.network_config):
            if isinstance(config, nn.Module):
                self.network.append(config)
                continue
            out_channels, stride, dilation, num_blocks, stage_type = config
            stage_prefix = 'stage_{}'.format(i - 1)
            blocks = [BasicBlock(stage_prefix + '_1', in_channels,
                out_channels, stride, dilation)]
            for i in range(1, num_blocks):
                blocks.append(BasicBlock(stage_prefix + '_{}'.format(i + 1),
                    out_channels, out_channels, 1, dilation))
            self.network += [nn.Sequential(*blocks)]

            in_channels = out_channels
        self.network = nn.Sequential(*self.network)

        for name, m in self.named_modules():
            if any(map(lambda x: isinstance(m, x), [nn.Linear, nn.Conv1d, nn.Conv2d])):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def trainable_parameters(self):
        parameters = [
            {'params': self.cls_head_list.parameters(), 'lr_mult': 1.0},
            {'params': self.loc_head_list.parameters(), 'lr_mult': 1.0},
            # {'params': self.network.parameters(), 'lr_mult': 0.1},
        ]
        for i in range(len(self.network)):
            lr_mult = 0.1 if i in (0, 1, 2, 3, 4, 5) else 1
            parameters.append(
                {'params': self.network[i].parameters(), 'lr_mult': lr_mult}
            )
        return parameters

    def forward(self, x):
        x = self.network(x)
        return x.reshape(x.shape[0], -1)

class Model:
    def __init__(self, path_to_model):
        self.transform = Transform().val_transform
        self.model = NIMA(pretrained_base_model=False)
        state_dict = torch.load(path_to_model, map_location=lambda storage, loc: storage)
        self.model.load_state_dict(state_dict)
        self.model = self.model.to(device)
        self.model.eval()

    def predict_from_file(self, image_path):
        image = default_loader(image_path)
        return self.predict(image)

    def predict_from_pil_image(self, image):
        image = image.convert('RGB')
        return self.predict(image)

    def predict(self, image):
        image = self.transform(image)
        image = image.unsqueeze_(0)
        image = image.to(device)
        image = torch.autograd.Variable(image, volatile=True)
        prob = self.model(image).data.cpu().numpy()[0]

        mean_score = get_mean_score(prob)
        std_score = get_std_score(prob)

        return format_output(mean_score, std_score, prob)

if __name__ == '__main__':

    import PIL.Image
    import torchvision
    import numpy as np

    def assert_diff(a, b):
        if isinstance(a, torch.Tensor):
            a = a.detach().cpu().numpy()
        if isinstance(b, torch.Tensor):
            b = b.detach().cpu().numpy()
        print(a.shape, b.shape)
        a = a.reshape(-1)
        b = b.reshape(-1)
        assert a.shape == b.shape
        diff = np.abs(a - b)
        print('mean diff = %f' % diff.mean())
        assert diff.mean() < 0.001
        print('max diff = %f' % diff.max())
        assert diff.max() < 0.001

    # Initilize a PyTorch model.
    num_classes = 1000
    model_width = 0.25
    net = Network(num_classes, model_width).train(False)
    print(net)
    load_pytorch = '/Users/xuqiong/code/image/Pytorch/shufflenet_v2_x0.25.pth'
    net.load_state_dict(torch.load(load_pytorch, map_location=lambda storage, loc: storage))

    ##############test##############
    dir = '/Users/xuqiong/code/image/NIMA/nima.pytorch/test2'
    filelist = []
    filenames = os.listdir(dir)
    timeall = 0
    for fn in filenames:
        if fn == '.DS_Store':
            continue
        fullfilename = os.path.join(dir, fn)
        filelist.append(fullfilename)

    image_hw = 224
    image_hw = 224
    timeall = 0
    for i in range(0, len(filelist)):
        test = filelist[i]

        img = PIL.Image.open(test).convert('RGB')
        img = torchvision.transforms.functional.resize(img, (image_hw, image_hw))
        img = torchvision.transforms.functional.to_tensor(img).unsqueeze(0).numpy()
        x = torch.tensor(img.copy(), dtype=torch.float32)
        with torch.no_grad():
            time0 = time.time()
            cls_results = net(x)
            time1 = time.time()
            timed = time1 - time0
            timeall = timeall + timed
            print(timed*1000)
        #print("result: ", cls_results.shape)

    print("avg time: ", timeall*1000/len(filelist), " ms")

 

你可能感兴趣的:(pytorch)