如何利用insightface同时计算多个人脸相似度

1、安装环境:

pip install mxnet,python-opencv,tqdm

2、对齐矫正人脸并裁剪

3、用insightface计算相似度,无需安装insightface等,两个脚本即可:

'''
face_analysis.py
'''

from __future__ import division
import mxnet as mx
import numpy as np
import os, cv2
import glob
import zipfile
import hashlib
import requests
from tqdm import tqdm

class FaceRecognition:
    def __init__(self, name, download, param_file):
        self.name = name
        self.download = download
        self.param_file = param_file
        self.image_size = (112, 112)
        if download:
            assert param_file

    def prepare(self, ctx_id, batch_size=1):
        if self.param_file:
            pos = self.param_file.rfind('-')
            prefix = self.param_file[0:pos]
            pos2 = self.param_file.rfind('.')
            epoch = int(self.param_file[pos+1:pos2])
            sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
            all_layers = sym.get_internals()
            sym = all_layers['fc1_output']
            if ctx_id>=0:
                ctx = mx.gpu(ctx_id)
            else:
                ctx = mx.cpu()
            model = mx.mod.Module(symbol=sym, context=ctx, label_names = None)
            data_shape = (batch_size,3)+self.image_size
            model.bind(data_shapes=[('data', data_shape)])
            model.set_params(arg_params, aux_params)
            #warmup
            data = mx.nd.zeros(shape=data_shape)
            db = mx.io.DataBatch(data=(data,))
            model.forward(db, is_train=False)
            embedding = model.get_outputs()[0].asnumpy()
            self.model = model
        else:
            pass
    
    def get_embedding(self, data):
        data = mx.nd.array(data)
        db = mx.io.DataBatch(data=(data,))
        self.model.forward(db, is_train=False)
        embedding = self.model.get_outputs()[0].asnumpy()
        return embedding

    def compute_sim(self, img1, img2):
        emb1 = self.get_embedding(img1).flatten()
        emb2 = self.get_embedding(img2).flatten()
        from numpy.linalg import norm
        sim = np.dot(emb1, emb2)/(norm(emb1)*norm(emb2))
        return sim

def get_arcface(name, download=True,
               root='insightface/models', **kwargs):
    if not download:
        return FaceRecognition(name, False, None)
    else:
        _file = get_model_file("arcface_%s"%name, root=root)
        return FaceRecognition(name, True, _file)

def arcface_r100_v1(**kwargs):
    return get_arcface("r100_v1", download=True, **kwargs)

def check_sha1(filename, sha1_hash):
    """Check whether the sha1 hash of the file content matches the expected hash.
    Parameters
    ----------
    filename : str
        Path to the file.
    sha1_hash : str
        Expected sha1 hash in hexadecimal digits.
    Returns
    -------
    bool
        Whether the file content matches the expected hash.
    """
    sha1 = hashlib.sha1()
    with open(filename, 'rb') as f:
        while True:
            data = f.read(1048576)
            if not data:
                break
            sha1.update(data)

    sha1_file = sha1.hexdigest()
    l = min(len(sha1_file), len(sha1_hash))
    return sha1.hexdigest()[0:l] == sha1_hash[0:l]

def download(url, path=None, overwrite=False, sha1_hash=None):
    """Download an given URL
    Parameters
    ----------
    url : str
        URL to download
    path : str, optional
        Destination path to store downloaded file. By default stores to the
        current directory with same name as in url.
    overwrite : bool, optional
        Whether to overwrite destination file if already exists.
    sha1_hash : str, optional
        Expected sha1 hash in hexadecimal digits. Will ignore existing file when hash is specified
        but doesn't match.
    Returns
    -------
    str
        The file path of the downloaded file.
    """
    if path is None:
        fname = url.split('/')[-1]
    else:
        path = os.path.expanduser(path)
        if os.path.isdir(path):
            fname = os.path.join(path, url.split('/')[-1])
        else:
            fname = path

    if overwrite or not os.path.exists(fname) or (sha1_hash and not check_sha1(fname, sha1_hash)):
        dirname = os.path.dirname(os.path.abspath(os.path.expanduser(fname)))
        if not os.path.exists(dirname):
            os.makedirs(dirname)

        print('Downloading %s from %s...'%(fname, url))
        r = requests.get(url, stream=True)
        if r.status_code != 200:
            raise RuntimeError("Failed downloading url %s"%url)
        total_length = r.headers.get('content-length')
        with open(fname, 'wb') as f:
            if total_length is None: # no content length header
                for chunk in r.iter_content(chunk_size=1024):
                    if chunk: # filter out keep-alive new chunks
                        f.write(chunk)
            else:
                total_length = int(total_length)
                for chunk in tqdm(r.iter_content(chunk_size=1024),
                                  total=int(total_length / 1024. + 0.5),
                                  unit='KB', unit_scale=False, dynamic_ncols=True):
                    f.write(chunk)

        if sha1_hash and not check_sha1(fname, sha1_hash):
            raise UserWarning('File {} is downloaded but the content hash does not match. ' \
                              'The repo may be outdated or download may be incomplete. ' \
                              'If the "repo_url" is overridden, consider switching to ' \
                              'the default repo.'.format(fname))

    return fname

def find_params_file(dir_path):
    if not os.path.exists(dir_path):
        return None
    paths = glob.glob("%s/*.params"%dir_path)
    if len(paths)==0:
        return None
    paths = sorted(paths)
    return paths[-1]

def get_model_file(name, root=os.path.join('insightface', 'models')):
    r"""Return location for the pretrained on local file system.

    This function will download from online model zoo when model cannot be found or has mismatch.
    The root directory will be created if it doesn't exist.

    Parameters
    ----------
    name : str
        Name of the model.
    root : str, default '~/.mxnet/models'
        Location for keeping the model parameters.

    Returns
    -------
    file_path
        Path to the requested pretrained model file.
    """
    _model_sha1 = {name: checksum for checksum, name in [
        ('95be21b58e29e9c1237f229dae534bd854009ce0', 'arcface_r100_v1'),
        ('', 'arcface_mfn_v1'),
        ('39fd1e087a2a2ed70a154ac01fecaa86c315d01b', 'retinaface_r50_v1'),
        ('2c9de8116d1f448fd1d4661f90308faae34c990a', 'retinaface_mnet025_v1'),
        ('0db1d07921d005e6c9a5b38e059452fc5645e5a4', 'retinaface_mnet025_v2'),
        ('7dd8111652b7aac2490c5dcddeb268e53ac643e6', 'genderage_v1'),
    ]}

    base_repo_url = 'http://insightface.ai/files/'
    _url_format = '{repo_url}models/{file_name}.zip'

    file_name = name
    root = os.path.expanduser(root)
    dir_path = os.path.join(root, name)
    file_path = find_params_file(dir_path)
    #file_path = os.path.join(root, file_name + '.params')
    sha1_hash = _model_sha1[name]
    if file_path is not None:
        if check_sha1(file_path, sha1_hash):
            return file_path
        else:
            print('Mismatch in the content of model file detected. Downloading again.')
    else:
        print('Model file is not found. Downloading.')

    if not os.path.exists(root):
        os.makedirs(root)
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)

    zip_file_path = os.path.join(root, file_name + '.zip')
    repo_url = base_repo_url
    if repo_url[-1] != '/':
        repo_url = repo_url + '/'
    download(_url_format.format(repo_url=repo_url, file_name=file_name),
             path=zip_file_path,
             overwrite=True)
    with zipfile.ZipFile(zip_file_path) as zf:
        zf.extractall(dir_path)
    os.remove(zip_file_path)
    file_path = find_params_file(dir_path)

    if check_sha1(file_path, sha1_hash):
        return file_path
    else:
        raise ValueError('Downloaded file has different hash. Please try again.')

调用face_analysis.py即可多张图片同时inference求相似度,也可以将结果放到pytorch中调用:

from face_analysis import *
import cv2
import numpy as np
import torch

class FaceAnalysis:
    def __init__(self, ctx_id=-1, batch_size=1, rec_name='arcface_r100_v1'):
        if rec_name is not None:
            self.rec_model = arcface_r100_v1()
        else:
            self.rec_model = None
        if self.rec_model is not None:
            self.rec_model.prepare(ctx_id=ctx_id, batch_size=batch_size)

    def get(self, data):
        # if self.rec_model is not None:
        #     self.rec_model.prepare(ctx_id, batch_size=1)

        embedding = None
        if self.rec_model is not None:
            embedding = self.rec_model.get_embedding(data)
        return embedding

insface_model = FaceAnalysis(ctx_id=-1)#cpu:-1,gpu:1

img = cv2.imread('face.png')

img = cv2.resize(img[19:237, 19:237], (112, 112))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
data = np.transpose(img, (2,0,1))
data = np.expand_dims(data, axis=0)

img2 = cv2.imread('girl.png')
img2 = cv2.resize(img2[19:237, 19:237], (112, 112))
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
data2 = np.transpose(img2, (2,0,1))
data2 = np.expand_dims(data2, axis=0)

a= []
a.append(data)
a.append(data2)
datas = np.vstack(a)
batch_size = 2
emb = insface_model.get(datas)
print(emb.shape)
emb1 = emb[0].flatten()
emb2 = emb[1].flatten()
from numpy.linalg import norm
sim = np.dot(emb1, emb2)/(norm(emb1)*norm(emb2))
print(sim)
# embedding_norm = norm(embedding)
# normed_embedding = embedding / embedding_norm
emb = =torch.from_numpy(emb)

 

你可能感兴趣的:(人脸识别,深度学习)