原理: 通过android 系统自带的谷歌人脸识别获取响应的图片,保存在本地,然后跟先前的照片作比较,相似度大于0.8可以算为同一个人
java 部分代码:
public class FaceRecActivity extends ListActivity implements SurfaceHolder.Callback {
private SurfaceView surfaceView;
private SurfaceHolder mHolder;
private FaceCompare mFaceCompareMain;
private Button button;
private Button resultBtn;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_face);
initView();
}
private int screenWidth, screenHeight;
private void initView() {
surfaceView = findViewById(R.id.surface);
mHolder = surfaceView.getHolder();
mHolder.addCallback(this);
Display display = getWindowManager().getDefaultDisplay();
screenWidth = display.getWidth();
screenHeight = display.getHeight();
button = findViewById(R.id.recapture_btn);
resultBtn = findViewById(R.id.result_btn);
button.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
if (mCamera != null) {
try {
mCamera.startPreview();
mCamera.startFaceDetection();
} catch (Exception e) {
e.printStackTrace();
}
}
}
});
resultBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
String path = getExternalFilesDir("file").getPath();
Log.e("-->m", "--->" + mFaceCompareMain.getCompareResult(path + "/userface", path + "/face1"));
}
});
mFaceCompareMain = new FaceCompare(this);
}
private Camera mCamera;
private boolean isLoadFinish = false;
@Override
public void surfaceCreated(SurfaceHolder surfaceHolder) {
isLoadFinish = true;
startCamera();
}
private void startCamera() {
if (isLoadFinish) {
int frontId = findCamera(true);
if (frontId == -1) {
frontId = findCamera(false);
}
if (frontId != -1) {
mCamera = Camera.open(frontId);
}
try {
mCamera.setFaceDetectionListener(new MyFaceDetectorListener());
mCamera.setPreviewDisplay(mHolder);
} catch (Exception e) {
e.printStackTrace();
}
}
}
@Override
public void surfaceChanged(SurfaceHolder surfaceHolder, int i, int i1, int i2) {
if (mHolder.getSurface() == null) {
return;
}
try {
mCamera.stopPreview();
} catch (Exception e) {
e.printStackTrace();
}
try {
mCamera.setPreviewDisplay(mHolder);
//获取当前预览框的宽高
int measuredWidth = surfaceView.getMeasuredWidth();
int measuredHeight = surfaceView.getMeasuredHeight();
//设置相机预览图宽高
setCameraParms(mCamera, measuredWidth, measuredHeight);
mCamera.startPreview();
//开启脸部探测
startFaceDetection(); // re-start face detection feature
} catch (Exception e) {
e.printStackTrace();
}
}
@Override
public void surfaceDestroyed(SurfaceHolder surfaceHolder) {
releaseCamera();
}
private void releaseCamera() {
if (mCamera != null) {
mCamera.stopFaceDetection();
mCamera.stopPreview();
mCamera.release();
mCamera = null;
}
}
/**
* 启动脸部检测,如果getMaxNumDetectedFaces()!=0说明不支持脸部检测
*/
public void startFaceDetection() {
// Try starting Face Detection
Camera.Parameters params = mCamera.getParameters();
// start face detection only *after* preview has started
if (params.getMaxNumDetectedFaces() > 0) {
// mCamera supports face detection, so can start it:
mCamera.startFaceDetection();
} else {
Log.e("tag", "startFaceDetection: " + "不支持");
}
}
private int findCamera(boolean isFront) {
Camera.CameraInfo cameraInfo = new Camera.CameraInfo();
int count = Camera.getNumberOfCameras();
for (int i = 0; i < count; i++) {
Camera.getCameraInfo(i, cameraInfo);
if (isFront) {
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
return i;
}
} else {
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
return i;
}
}
}
return -1;
}
private class MyFaceDetectorListener implements Camera.FaceDetectionListener {
@Override
public void onFaceDetection(Camera.Face[] faces, Camera camera) {
if (faces.length > 0) {
Camera.Face face = faces[0];
RectF rectF = mFaceCompareMain.transForm(face, surfaceView);
//限制头像位置
if (rectF.left >= screenWidth * 0.2 && rectF.right <= screenWidth * 0.8 && rectF.top >= screenHeight * 0.2 && rectF.bottom <= screenHeight * 0.8) {
camera.takePicture(null, null, fjpgCallback);
camera.stopFaceDetection();
}
}
}
}
private Camera.PictureCallback fjpgCallback = new Camera.PictureCallback() {
public void onPictureTaken(byte[] data, Camera camera) {
Bitmap bitmap = mFaceCompareMain.rotateMyBitmap(BitmapFactory.decodeByteArray(data, 0, data.length));
//获得压缩后的图片
Bitmap bitmap2 = mFaceCompareMain.compressImage(mFaceCompareMain.transImage(bitmap, 480, 720, 100));
mFaceCompareMain.saveBitmap(bitmap2, getExternalFilesDir("file").getPath());
}
};
private void setCameraParms(Camera camera, int width, int height) {
// 获取摄像头支持的pictureSize列表
Camera.Parameters parameters = camera.getParameters();
parameters.setJpegQuality(100);
// 对焦
if (parameters.getSupportedFocusModes().contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE)) {
// 连续对焦
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_PICTURE);
}
//旋转90度,获取正向预览
camera.setDisplayOrientation(90);
camera.cancelAutoFocus();
camera.setParameters(parameters);
}
@Override
protected void onDestroy() {
super.onDestroy();
releaseCamera();
}
}
识别区代码:
package com.smartTransferBox.assembly.util;
import android.content.Context;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.Matrix;
import android.graphics.RectF;
import android.hardware.Camera;
import android.view.SurfaceView;
import org.opencv.core.Mat;
import org.opencv.core.MatOfFloat;
import org.opencv.core.MatOfInt;
import org.opencv.core.MatOfRect;
import org.opencv.core.Rect;
import org.opencv.imgcodecs.Imgcodecs;
import org.opencv.imgproc.Imgproc;
import org.opencv.objdetect.CascadeClassifier;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
public class FaceCompare {
//初始化人脸探测器
private CascadeClassifier faceDetector;
private Context mContext;
static {
System.loadLibrary("opencv_java3");
}
public FaceCompare(Context context) {
mContext = context;
copyToSD();
}
private void copyToSD() {
InputStream inputStream;
try {
File filePath = mContext.getExternalFilesDir("file");
String fileName = "haarcascade_frontalface_alt.xml";
inputStream = mContext.getResources().getAssets().open(fileName);
if (filePath == null || !filePath.exists()) {
filePath.mkdirs();
}
File faceFile = new File(filePath.getPath() + "/" + fileName);
if (faceFile.exists()) {
return;
}
FileOutputStream fileOutputStream = new FileOutputStream(filePath.getPath() + "/" + fileName);
byte[] buffer = new byte[1024];
int count = 0;
while ((count = inputStream.read(buffer)) > 0) {
fileOutputStream.write(buffer, 0, count);
}
fileOutputStream.flush();
fileOutputStream.close();
inputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
public double getCompareResult(String path1, String path2) {
return compare_image(path1, path2);
}
private double compare_image(String img_1, String img_2) {
try {
Mat mat_1 = conv_Mat(mContext, img_1);
Mat mat_2 = conv_Mat(mContext, img_2);
Mat hist_1 = new Mat();
Mat hist_2 = new Mat();
//颜色范围
MatOfFloat ranges = new MatOfFloat(0f, 256f);
//直方图大小, 越大匹配越精确 (越慢)
MatOfInt histSize = new MatOfInt(1000);
Imgproc.calcHist(Arrays.asList(mat_1), new MatOfInt(0), new Mat(), hist_1, histSize, ranges);
Imgproc.calcHist(Arrays.asList(mat_2), new MatOfInt(0), new Mat(), hist_2, histSize, ranges);
// CORREL 相关系数
return Imgproc.compareHist(hist_1, hist_2, Imgproc.CV_COMP_CORREL);
} catch (Exception e) {
e.printStackTrace();
}
return 0;
}
private Mat conv_Mat(Context context, String img_1) {
if (faceDetector == null) {
faceDetector = new CascadeClassifier(context.getExternalFilesDir("file").getPath() + "/haarcascade_frontalface_alt.xml");
}
Mat image0 = Imgcodecs.imread(img_1);
Mat image = new Mat();
//灰度转换
Imgproc.cvtColor(image0, image, Imgproc.COLOR_BGR2GRAY);
MatOfRect faceDetections = new MatOfRect();
//探测人脸
faceDetector.detectMultiScale(image, faceDetections);
// rect中是人脸图片的范围
for (Rect rect : faceDetections.toArray()) {
//切割rect人脸
Mat mat = new Mat(image, rect);
return mat;
}
return null;
}
public Bitmap transImage(Bitmap bitmap, int width, int height, int quality) {
int bitmapWidth = bitmap.getWidth();
int bitmapHeight = bitmap.getHeight();
// 缩放图片的尺寸
float scaleWidth = (float) width / bitmapWidth;
float scaleHeight = (float) height / bitmapHeight;
Matrix matrix = new Matrix();
matrix.postScale(scaleWidth, scaleHeight);
// 产生缩放后的Bitmap对象
Bitmap resizeBitmap = Bitmap.createBitmap(bitmap, 0, 0, bitmapWidth, bitmapHeight, matrix, false);
if (!bitmap.isRecycled()) {
bitmap.recycle();//记得释放资源,否则会内存溢出
}
return resizeBitmap;
}
//压缩图片
public Bitmap compressImage(Bitmap image) {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
image.compress(Bitmap.CompressFormat.JPEG, 100, baos);//质量压缩方法,这里100表示不压缩,把压缩后的数据存放到baos中
int options = 100;
while (baos.toByteArray().length / 1024 > 100) { //循环判断如果压缩后图片是否大于100kb,大于继续压缩
baos.reset();//重置baos即清空baos
image.compress(Bitmap.CompressFormat.JPEG, options, baos);//这里压缩options%,把压缩后的数据存放到baos中
options -= 10;//每次都减少10
}
ByteArrayInputStream isBm = new ByteArrayInputStream(baos.toByteArray());//把压缩后的数据baos存放到ByteArrayInputStream中
Bitmap bitmap = BitmapFactory.decodeStream(isBm, null, null);//把ByteArrayInputStream数据生成图片
return bitmap;
}
public void saveBitmap(Bitmap bitmap, String path) {
File file = new File(path + "/userface");
try {
FileOutputStream fileOutputStream = new FileOutputStream(file);
bitmap.compress(Bitmap.CompressFormat.JPEG, 100, fileOutputStream);
fileOutputStream.flush();
fileOutputStream.close();
} catch (Exception e) {
e.printStackTrace();
}
}
public RectF transForm(Camera.Face face, SurfaceView surfaceView) {
Matrix matrix = new Matrix();
// Need mirror for front camera.
matrix.setScale(-1f, 1f);
// This is the value for android.hardware.Camera.setDisplayOrientation.
matrix.postRotate(90);
// Camera driver coordinates range from (-1000, -1000) to (1000, 1000).
// UI coordinates range from (0, 0) to (width, height).
matrix.postScale(surfaceView.getWidth() / 2000f, surfaceView.getHeight() / 2000f);
matrix.postTranslate(surfaceView.getWidth() / 2f, surfaceView.getHeight() / 2f);
RectF srcRect = new RectF(face.rect);
RectF dstRect = new RectF(0f, 0f, 0f, 0f);
matrix.mapRect(dstRect, srcRect);
return dstRect;
}
//选择获取的图片保持为正向
public Bitmap rotateMyBitmap(Bitmap bmp) {
Matrix matrix = new Matrix();
matrix.postRotate(-90);
Bitmap nbmp2 = Bitmap.createBitmap(bmp, 0, 0, bmp.getWidth(), bmp.getHeight(), matrix, true);
return nbmp2;
}
}
布局:
so库是sdk自带的
总结而言 为了高精确度,需要将人脸限制在一个区域以便得到的图片可以大致相同,后面再通过opencv库得到相似度。如果不限制区域,容易出现人脸获取到,但是无法获取特征值之类的情况。至于优化方面可以将识别模块的代码转为Jni,可以更节省时间。