这一篇文章我们要实现Camera实现的等一些功能。熟悉Camera2API的使用,着重了解我们前面没有深入了解的视频录制相关的内容。
这个的实现和Camera API的步骤一摸一样。只是换了一个API而已。Camera是通过Camera.CameraInfo去获取相机,Camera2通过CameraManger去获取设备相机。关键代码如下:
private void getDefaultCameraId() {
mCameraManager = (CameraManager) mContext.getSystemService(Context.CAMERA_SERVICE);
try {
String[] cameraList = mCameraManager.getCameraIdList();
for (int i = 0; i < cameraList.length; i++) {
String cameraId = cameraList[i];
if (TextUtils.equals(cameraId, CAMERA_FONT)) {
mCameraId = cameraId;
break;
} else if (TextUtils.equals(cameraId, CAMERA_BACK)) {
mCameraId = cameraId;
break;
}
}
} catch (CameraAccessException e) {
e.printStackTrace();
}
}
/**
* 切换摄像头
*/
public void switchCamera() {
if (TextUtils.equals(mCameraId, CAMERA_FONT)) {
mCameraId = CAMERA_BACK;
} else {
mCameraId = CAMERA_FONT;
}
closeCamera();
openCamera(getWidth(), getHeight());
}
使用Camera2API来进行拍照要使用ImageRender来实现。具体步骤如下:
private void setupImageReader() {
//2代表ImageReader中最多可以获取两帧图像流
mImageReader = ImageReader.newInstance(mPreviewSize.getWidth(), mPreviewSize.getHeight(),
ImageFormat.JPEG, 2);
mImageReader.setOnImageAvailableListener(new ImageReader.OnImageAvailableListener() {
@Override
public void onImageAvailable(ImageReader reader) {
mBackgroundHandler.post(new ImageSaver(reader.acquireNextImage()));
}
}, mBackgroundHandler);
}
private static File mImageFile;
private ImageReader mImageReader;
private static class ImageSaver implements Runnable {
private Image mImage;
private ImageSaver(Image image) {
mImage = image;
}
@Override
public void run() {
ByteBuffer byteBuffer = mImage.getPlanes()[0].getBuffer();
byte[] bytes = new byte[byteBuffer.remaining()];
byteBuffer.get(bytes);
FileOutputStream fileOutputStream = null;
try {
fileOutputStream = new FileOutputStream(mImageFile);
fileOutputStream.write(bytes);
} catch (IOException e) {
e.printStackTrace();
} finally {
mImage.close();
if (fileOutputStream != null) {
try {
fileOutputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
}
//创建CaptureSession对象
mCameraDevice.createCaptureSession(Arrays.asList(surface, mImageReader.getSurface()), new CameraCaptureSession.StateCallback() {
@Override
public void onConfigured(@NonNull CameraCaptureSession cameraCaptureSession) {
//The camera is already closed
if (null == mCameraDevice) {
return;
}
Log.e(TAG, "onConfigured: ");
// When the session is ready, we start displaying the preview.
mCameraCaptureSessions = cameraCaptureSession;
//更新预览
updatePreview();
}
@Override
public void onConfigureFailed(@NonNull CameraCaptureSession cameraCaptureSession) {
Toast.makeText(mContext, "Configuration change", Toast.LENGTH_SHORT).show();
}
}, null);
这些代码在创建预览View的方法里面。
锁定焦点
```
private void lockFocus() {
try {
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_START);
mCameraCaptureSessions.capture(mPreviewRequestBuilder.build(), mCaptureCallback, mBackgroundHandler);
} catch (CameraAccessException e) {
e.printStackTrace();
}
}
拍照
private void capture() {
try {
final CaptureRequest.Builder mCaptureBuilder = mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_STILL_CAPTURE);
int rotation = ((Activity) mContext).getWindowManager().getDefaultDisplay().getRotation();
mCaptureBuilder.addTarget(mImageReader.getSurface());
mCaptureBuilder.set(CaptureRequest.JPEG_ORIENTATION, ORIENTATIONS.get(rotation));
CameraCaptureSession.CaptureCallback CaptureCallback = new CameraCaptureSession.CaptureCallback() {
@Override
public void onCaptureCompleted(CameraCaptureSession session, CaptureRequest request, TotalCaptureResult result) {
Toast.makeText(mContext, "Image Saved!", Toast.LENGTH_SHORT).show();
unLockFocus();
updatePreview();
}
};
mCameraCaptureSessions.stopRepeating();
mCameraCaptureSessions.capture(mCaptureBuilder.build(), CaptureCallback, null);
} catch (CameraAccessException e) {
e.printStackTrace();
}
}
解锁焦点
private void unLockFocus() {
try {
mPreviewRequestBuilder.set(CaptureRequest.CONTROL_AF_TRIGGER, CameraMetadata.CONTROL_AF_TRIGGER_CANCEL);
//mCameraCaptureSession.capture(mCaptureRequestBuilder.build(), null, mCameraHandler);
mCameraCaptureSessions.setRepeatingRequest(mPreviewRequestBuilder.build(), null, mBackgroundHandler);
} catch (CameraAccessException e) {
e.printStackTrace();
}
}
录制视频同样使用MediaRecorder来协助完成。具体步骤如下:
private void closePreviewSession() {
if (null != mCameraCaptureSessions) {
mCameraCaptureSessions.close();
mCameraCaptureSessions = null;
}
}
private void setUpMediaRecorder() throws IOException {
mMediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mMediaRecorder.setVideoSource(MediaRecorder.VideoSource.SURFACE);
mMediaRecorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4);
mVideoPath = getOutputMediaFile(MEDIA_TYPE_VIDEO);
mMediaRecorder.setOutputFile(mVideoPath.getAbsolutePath());
mMediaRecorder.setVideoEncodingBitRate(10000000);
mMediaRecorder.setVideoFrameRate(30);
mMediaRecorder.setVideoSize(mVideoSize.getWidth(), mVideoSize.getHeight());
mMediaRecorder.setVideoEncoder(MediaRecorder.VideoEncoder.H264);
mMediaRecorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC);
int rotation = ((Activity) mContext).getWindowManager().getDefaultDisplay().getRotation();
switch (mSensorOrientation) {
case SENSOR_ORIENTATION_DEFAULT_DEGREES:
mMediaRecorder.setOrientationHint(DEFAULT_ORIENTATIONS.get(rotation));
break;
case SENSOR_ORIENTATION_INVERSE_DEGREES:
mMediaRecorder.setOrientationHint(INVERSE_ORIENTATIONS.get(rotation));
break;
}
mMediaRecorder.prepare();
}
private void startRecordingVideo() {
if (null == mCameraDevice || !isAvailable() || null == mPreviewSize) {
return;
}
try {
closePreviewSession();
setUpMediaRecorder();
SurfaceTexture texture = getSurfaceTexture();
assert texture != null;
texture.setDefaultBufferSize(mPreviewSize.getWidth(), mPreviewSize.getHeight());
mPreviewRequestBuilder = mCameraDevice.createCaptureRequest(CameraDevice.TEMPLATE_RECORD);
List surfaces = new ArrayList<>();
// Set up Surface for the camera preview
Surface previewSurface = new Surface(texture);
surfaces.add(previewSurface);
mPreviewRequestBuilder.addTarget(previewSurface);
// Set up Surface for the MediaRecorder
Surface recorderSurface = mMediaRecorder.getSurface();
surfaces.add(recorderSurface);
mPreviewRequestBuilder.addTarget(recorderSurface);
// Start a capture session
// Once the session starts, we can update the UI and start recording
mCameraDevice.createCaptureSession(surfaces, new CameraCaptureSession.StateCallback() {
@Override
public void onConfigured(@NonNull CameraCaptureSession cameraCaptureSession) {
mCameraCaptureSessions = cameraCaptureSession;
updatePreview();
Toast.makeText(mContext, "start record video success", Toast.LENGTH_SHORT).show();
Log.e(TAG, "onConfigured: "+Thread.currentThread().getName());
// Start recording
mMediaRecorder.start();
}
@Override
public void onConfigureFailed(@NonNull CameraCaptureSession cameraCaptureSession) {
Toast.makeText(mContext, "Failed", Toast.LENGTH_SHORT).show();
}
}, mBackgroundHandler);
} catch (CameraAccessException | IOException e) {
e.printStackTrace();
}
}
private void stopRecordingVideo() {
// Stop recording
mMediaRecorder.stop();
mMediaRecorder.reset();
Toast.makeText(mContext, "Video saved: " + mVideoPath.getAbsolutePath(),
Toast.LENGTH_SHORT).show();
createCameraPreview();
}
我们简单的实现和使用Camera的时候一样的水印的功能,拿到摄像头回掉的每一帧数据,然后对它进行加工处理显示到另外一个SurfaceView上面去。
在拍照的时候我们使用了ImageReader类来进行处理,在处理每一帧的回掉的时候我们还是要借助这个类来进行处理。
mPreviewRequestBuilder.addTarget(mImageReader.getSurface());
这样添加以后我们可以得到相机的每一帧的数据
private void setupImageReader() {
//2代表ImageReader中最多可以获取两帧图像流
mImageReader = ImageReader.newInstance(mPreviewSize.getWidth(), mPreviewSize.getHeight(),
ImageFormat.JPEG, 1);
mImageReader.setOnImageAvailableListener(new ImageReader.OnImageAvailableListener() {
@Override
public void onImageAvailable(ImageReader reader) {
//这里一定要调用reader.acquireNextImage()和img.close方法否则不会一直回掉了
Image img = reader.acquireNextImage();
img.close();
break;
}
}
}, mBackgroundHandler);
}
这里把预览的回掉和拍照的回掉做了一个区分的处理。
有一个要注意的是
Image img = reader.acquireNextImage(); img.close();
一定要在每一帧里面拿了Image数据,不然会出现只回掉一次的问题。还有一个要注意的是我们每帧数据的格式我们现在设置的JPEG。这个格式有点绕下面了解
private void setupImageReader() {
//2代表ImageReader中最多可以获取两帧图像流
mImageReader = ImageReader.newInstance(mPreviewSize.getWidth(), mPreviewSize.getHeight(),
ImageFormat.JPEG, 1);
mImageReader.setOnImageAvailableListener(new ImageReader.OnImageAvailableListener() {
@Override
public void onImageAvailable(ImageReader reader) {
switch (mState) {
case STATE_PREVIEW:
//这里一定要调用reader.acquireNextImage()和img.close方法否则不会一直回掉了
Image img = reader.acquireNextImage();
if (mIsAddWaterMark) {
try {
//获取图片byte数组
Image.Plane[] planes = img.getPlanes();
ByteBuffer buffer = planes[0].getBuffer();
buffer.rewind();
byte[] data = new byte[buffer.capacity()];
buffer.get(data);
//从byte数组得到Bitmap
Bitmap bitmap = BitmapFactory.decodeByteArray(data, 0, data.length);
//得到的图片是我们的预览图片的大小进行一个缩放到水印图片里面可以完全显示
bitmap = ImageUtil.zoomBitmap(bitmap, mWaterMarkPreview.getWidth(),
mWaterMarkPreview.getHeight());
//图片旋转 后置旋转90度,前置旋转270度
bitmap = BitmapUtils.rotateBitmap(bitmap, mCameraId.equals(CAMERA_BACK) ? 90 : 270);
//文字水印
bitmap = BitmapUtils.drawTextToCenter(mContext, bitmap,
System.currentTimeMillis() + "", 16, Color.RED);
// 获取到画布
Canvas canvas = mWaterMarkPreview.getHolder().lockCanvas();
if (canvas == null) return;
canvas.drawBitmap(bitmap, 0, 0, new Paint());
mWaterMarkPreview.getHolder().unlockCanvasAndPost(canvas);
} catch (Exception e) {
e.printStackTrace();
}
}
img.close();
break;
case STATE_CAPTURE:
mBackgroundHandler.post(new ImageSaver(reader.acquireNextImage()));
break;
}
}
}, mBackgroundHandler);
}
还是上面的函数进行了拍照和帧数据返回的区分里面的代码大致和使用Camera的时候一样但是有一些坑。
设置ImageReader为ImageFormat.YUV_420_888的时候照片返回会流畅一些。但是,它要在的到拍照图片的时候要转换YUV->NV->JPEG,在经过这一转换了的图片会有一些绿色的遮罩在上面,很不好。
问题现象
找不到合适的解决这个图片格式转换的问题,于是只能初始化的时候设置为JPEG的输出图片了。还有一个问题就是换了JPEG格式输出但是在一些配置较低的手机上面明显就感觉相机的数据看上去有些卡了,看来上面这个问题我们还是得解决,但现在找不到解决的方案了。
本文Demo代码
我们对于Camera和Camera2做的Demo都是对API有个一定的了解。但是要想实际项目里面来用,那肯定是足够的。API的兼容上面都有很大的问题。仔细看了API的使用,然后有一些参考的代码值得学习。
google官方的一个整合Camera和Camera2的一个相机预览拍照应用
官方Camera2API使用介绍项目
使用Camera2进行视频录制的应用