python 和 c++
https://github.com/deepinsight/insightface/issues/481
代码
https://github.com/deepinsight/insightface/blob/master/cpp-align/FacePreprocess.h
截取原文链接
https://blog.csdn.net/xiexiecn/article/details/96138691
https://github.com/deepinsight/insightface,c++接口人脸检测对齐的部分
from skimage import transform as trans
import numpy as np
dst = np.array([[35.916126,43.42934 ],
[86.2129 ,49.016266],
[55.2765 ,71.023384],
[32.937046,87.26657 ],
[72.59505 ,91.47491 ]],dtype=np.float32)
src = np.array([ #这个是 112 96 ,如果是112 112,, x+8
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, `92.3655],`
[62.7299, 92.2041]], dtype=np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params
print(M)
The output result which same as the cpp one.
[[ 0.80088554 0.12822716 -6.07010382]
[ -0.12822716 0.80088554 24.34255465]
[ 0. 0. 1. ]]
// align need orign image \ landmark\ src
// float v1[5][2] = { //112 96
// {30.2946f, 51.6963f},
// {65.5318f, 51.5014f},
// {48.0252f, 71.7366f},
// {33.5493f, 92.3655f},
// {62.7299f, 92.2041f}};
float v1[5][2] = { //112 112 x+ 8.0,y =y,this is fix,src
{38.2946f, 51.6963f},
{73.5318f, 51.5014f},
{56.0252f, 71.7366f},
{41.5493f, 92.3655f},
{70.7299f, 92.2041f}};
cv::Mat src(5,2,CV_32FC1, v1);
memcpy(src.data, v1, 2 * 5 * sizeof(float));
float v2[5][2] = // this is landmark, dst ,need to modify
{{35.916126, 43.42934 },
{86.2129 , 49.016266},
{55.2765 , 71.023384},
{32.937046,87.26657 },
{72.59505, 91.47491 }};
cv::Mat dst(5,2,CV_32FC1, v2);
memcpy(dst.data,v2,2*5*sizeof(float));
cv::Mat m = FacePreprocess::similarTransform(dst ,src);//dst parameter require the target landmark.
c++ 对应python 包的代码
namespace FacePreprocess {
cv::Mat meanAxis0(const cv::Mat &src)
{
int num = src.rows;
int dim = src.cols;
// x1 y1
// x2 y2
cv::Mat output(1,dim,CV_32F);
for(int i = 0 ; i < dim; i ++)
{
float sum = 0 ;
for(int j = 0 ; j < num ; j++)
{
sum+=src.at(j,i);
}
output.at(0,i) = sum/num;
}
return output;
}
cv::Mat elementwiseMinus(const cv::Mat &A,const cv::Mat &B)
{
cv::Mat output(A.rows,A.cols,A.type());
assert(B.cols == A.cols);
if(B.cols == A.cols)
{
for(int i = 0 ; i < A.rows; i ++)
{
for(int j = 0 ; j < B.cols; j++)
{
output.at(i,j) = A.at(i,j) - B.at(0,j);
}
}
}
return output;
}
cv::Mat varAxis0(const cv::Mat &src)
{
cv::Mat temp_ = elementwiseMinus(src,meanAxis0(src));
cv::multiply(temp_ ,temp_ ,temp_ );
return meanAxis0(temp_);
}
int MatrixRank(cv::Mat M)
{
cv::Mat w, u, vt;
cv::SVD::compute(M, w, u, vt);
cv::Mat1b nonZeroSingularValues = w > 0.0001;
int rank = countNonZero(nonZeroSingularValues);
return rank;
}
// References
// ----------
// .. [1] "Least-squares estimation of transformation parameters between two
// point patterns", Shinji Umeyama, PAMI 1991, DOI: 10.1109/34.88573
//
// """
//
// Anthor:Jack Yu
cv::Mat similarTransform(cv::Mat src,cv::Mat dst) {
int num = src.rows;
int dim = src.cols;
cv::Mat src_mean = meanAxis0(src);
cv::Mat dst_mean = meanAxis0(dst);
cv::Mat src_demean = elementwiseMinus(src, src_mean);
cv::Mat dst_demean = elementwiseMinus(dst, dst_mean);
cv::Mat A = (dst_demean.t() * src_demean) / static_cast(num);
cv::Mat d(dim, 1, CV_32F);
d.setTo(1.0f);
if (cv::determinant(A) < 0) {
d.at(dim - 1, 0) = -1;
}
cv::Mat T = cv::Mat::eye(dim + 1, dim + 1, CV_32F);
cv::Mat U, S, V;
cv::SVD::compute(A, S,U, V);
// the SVD function in opencv differ from scipy .
int rank = MatrixRank(A);
if (rank == 0) {
assert(rank == 0);
} else if (rank == dim - 1) {
if (cv::determinant(U) * cv::determinant(V) > 0) {
T.rowRange(0, dim).colRange(0, dim) = U * V;
} else {
// s = d[dim - 1]
// d[dim - 1] = -1
// T[:dim, :dim] = np.dot(U, np.dot(np.diag(d), V))
// d[dim - 1] = s
int s = d.at(dim - 1, 0) = -1;
d.at(dim - 1, 0) = -1;
T.rowRange(0, dim).colRange(0, dim) = U * V;
cv::Mat diag_ = cv::Mat::diag(d);
cv::Mat twp = diag_*V; //np.dot(np.diag(d), V.T)
cv::Mat B = cv::Mat::zeros(3, 3, CV_8UC1);
cv::Mat C = B.diag(0);
T.rowRange(0, dim).colRange(0, dim) = U* twp;
d.at(dim - 1, 0) = s;
}
}
else{
cv::Mat diag_ = cv::Mat::diag(d);
cv::Mat twp = diag_*V.t(); //np.dot(np.diag(d), V.T)
cv::Mat res = U* twp; // U
T.rowRange(0, dim).colRange(0, dim) = -U.t()* twp;
}
cv::Mat var_ = varAxis0(src_demean);
float val = cv::sum(var_).val[0];
cv::Mat res;
cv::multiply(d,S,res);
float scale = 1.0/val*cv::sum(res).val[0];
T.rowRange(0, dim).colRange(0, dim) = - T.rowRange(0, dim).colRange(0, dim).t();
cv::Mat temp1 = T.rowRange(0, dim).colRange(0, dim); // T[:dim, :dim]
cv::Mat temp2 = src_mean.t(); //src_mean.T
cv::Mat temp3 = temp1*temp2; // np.dot(T[:dim, :dim], src_mean.T)
cv::Mat temp4 = scale*temp3;
T.rowRange(0, dim).colRange(dim, dim+1)= -(temp4 - dst_mean.t()) ;
T.rowRange(0, dim).colRange(0, dim) *= scale;
return T;
}
}
//
from skimage import transform as trans
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = trorm.params
arped = cv2.warpAffine(img,M,(w,h))
使用说明
float default[5][2] = {
{30.2946f+8.0f, 51.6963f},
{65.5318f+8.0f, 51.5014f},
{48.0252f+8.0f, 71.7366f},
{33.5493f+8.0f, 92.3655f},
{62.7299f+8.0f, 92.2041f}
}; // +8.0f for 112*112
float detect[5][2] = {
{30.2946f, 51.6963f},
{65.5318f, 51.5014f},
{48.0252f, 71.7366f},
{33.5493f, 92.3655f},
{62.7299f, 92.2041f}
}; //
cv::Mat src(5,2,CV_32FC1, default);
memcpy(src.data, default, 2 * 5 * sizeof(float));
cv::Mat dst(5,2,CV_32FC1, detect);
memcpy(dst.data, detect, 2 * 5 * sizeof(float));
cv::Mat M = FacePreprocess::similarTransform(dst, src); // skimage.transform.SimilarityTransform
cv::Mat warpImg;
cv::warpAffine(input, warpImg, M, cv::Size(112, 112)); //input is orign
转载,https://blog.csdn.net/u011956147/article/details/90294249