Android的Camera现在有新的api,但是要5.0之后才能用,书上的源码也是用老版本api,简单分析一下预览流程。为了篇幅部分中间的调用流程将会省略,只贴出核心代码。
首先我们肯定是要初始化egl,Native层使用hanlder的形式来循环处理消息。
void MVRecordingPreviewController::prepareEGLContext(ANativeWindow* window, JavaVM *g_jvm, jobject obj, int screenWidth, int screenHeight, int cameraFacingId){
LOGI("Creating MVRecordingPreviewController thread");
this->g_jvm = g_jvm;
this->obj = obj;
this->_window = window;
this->screenWidth = screenWidth;
this->screenHeight = screenHeight;
this->facingId = cameraFacingId;
handler->postMessage(new Message(MSG_EGL_THREAD_CREATE));
//开启线程循环处理消息,没有消息会阻塞wait()
pthread_create(&_threadId, 0, threadStartCallback, this);
}
class MVRecordingPreviewHandler: public Handler {
private:
MVRecordingPreviewController* previewController;
public:
MVRecordingPreviewHandler(MVRecordingPreviewController* previewController, MessageQueue* queue) :
Handler(queue) {
this->previewController = previewController;
}
void handleMessage(Message* msg) {
int what = msg->getWhat();
switch (what) {
case MSG_EGL_THREAD_CREATE:
previewController->initialize();
break;
case MSG_EGL_CREATE_PREVIEW_SURFACE:
previewController->createPreviewSurface();
break;
case MSG_SWITCH_CAMERA_FACING:
previewController->switchCamera();
break;
case MSG_EGL_DESTROY_PREVIEW_SURFACE:
previewController->destroyPreviewSurface();
break;
case MSG_EGL_THREAD_EXIT:
previewController->destroy();
break;
case MSG_RENDER_FRAME:
previewController->renderFrame();
break;
}
}
};
真正的初始化逻辑
bool MVRecordingPreviewController::initialize() {
const EGLint attribs[] = { EGL_BUFFER_SIZE, 32, EGL_ALPHA_SIZE, 8, EGL_BLUE_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_RED_SIZE, 8, EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT,
EGL_SURFACE_TYPE, EGL_WINDOW_BIT, EGL_NONE };
LOGI("Initializing context");
eglCore = new EGLCore();
eglCore->init();
this->createPreviewSurface();
//到此初始化egl环境已经完成
...
return true;
}
配置这个参数会从Native层调用Java的Camera的API来获取配置。
@TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH)
private CameraConfigInfo setUpCamera(final int id) throws CameraParamSettingException {
forcePreviewSize_640_480();
// forcePreviewSize_1280_720();
// printStackTrace(CameraLoader.class);
try {
// 1、开启Camera
try {
mCamera = getCameraInstance(id);
} catch (CameraParamSettingException e) {
throw e;
}
boolean mHasPermission = hasPermission();
if (!mHasPermission) {
throw new CameraParamSettingException("拍摄权限被禁用或被其他程序占用, 请确认后再录制");
}
Parameters parameters = mCamera.getParameters();
// 2、设置预览照片的图像格式
List supportedPreviewFormats = parameters.getSupportedPreviewFormats();
if (supportedPreviewFormats.contains(ImageFormat.NV21)) {
parameters.setPreviewFormat(ImageFormat.NV21);
} else {
throw new CameraParamSettingException("视频参数设置错误:设置预览图像格式异常");
}
// 3、设置预览照片的尺寸
List supportedPreviewSizes = parameters.getSupportedPreviewSizes();
int previewWidth = VIDEO_WIDTH;
int previewHeight = VIDEO_HEIGHT;
boolean isSupportPreviewSize = isSupportPreviewSize(supportedPreviewSizes, previewWidth, previewHeight);
if (isSupportPreviewSize) {
parameters.setPreviewSize(previewWidth, previewHeight);
} else {
previewWidth = DEFAULT_VIDEO_WIDTH;
previewHeight = DEFAULT_VIDEO_HEIGHT;
isSupportPreviewSize = isSupportPreviewSize(
supportedPreviewSizes, previewWidth, previewHeight);
if (isSupportPreviewSize) {
VIDEO_WIDTH = DEFAULT_VIDEO_WIDTH;
VIDEO_HEIGHT = DEFAULT_VIDEO_HEIGHT;
parameters.setPreviewSize(previewWidth, previewHeight);
} else {
throw new CameraParamSettingException("视频参数设置错误:设置预览的尺寸异常");
}
}
//下面这行设置 有可能导致 返回的图像尺寸和预期不一致
// parameters.setRecordingHint(true);
// 4、设置视频记录的连续自动对焦模式
if (parameters.getSupportedFocusModes().contains(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO)) {
parameters.setFocusMode(Camera.Parameters.FOCUS_MODE_CONTINUOUS_VIDEO);
}
try {
mCamera.setParameters(parameters);
} catch (Exception e) {
throw new CameraParamSettingException("视频参数设置错误");
}
int degress = getCameraDisplayOrientation((Activity) mContext, id);
int cameraFacing = getCameraFacing(id);
return new CameraConfigInfo(degress, previewWidth, previewHeight, cameraFacing);
} catch (Exception e) {
throw new CameraParamSettingException(e.getMessage());
}
}
配置Camera的流程
void RecordingPreviewRenderer::init(int degress, bool isVFlip, int textureWidth, int textureHeight, int cameraWidth, int cameraHeight) {
LOGI("enter RecordingPreviewRenderer::init() textureWidth is %d, textureHeight is %d", textureWidth, textureHeight);
this->degress = degress;
this->isVFlip = isVFlip;
textureCoords = new GLfloat[textureCoordsSize];
//下面这个方法通过旋转的角度设置对应的坐标
//如果是前置摄像头再进行镜像左右翻转
//看一个没有旋转的矩阵坐标吧
/*
static GLfloat CAMERA_TEXTURE_NO_ROTATION[8] = {
0.0f, 1.0f,
1.0f, 1.0f,
0.0f, 0.0f,
1.0f, 0.0f
};
*/
this->fillTextureCoords();
this->textureWidth = textureWidth;
this->textureHeight = textureHeight;
this->cameraWidth = cameraWidth;
this->cameraHeight = cameraHeight;
//1、初始化fbo绘制的program
mCopier = new GPUTextureFrameCopier();
mCopier->init();
//初始化屏幕绘制的program
mRenderer = new VideoGLSurfaceRender();
mRenderer->init(textureWidth, textureHeight);
//初始化扩展纹理
cameraTexFrame = new GPUTextureFrame();
cameraTexFrame->createTexture();
//初始化fbo的输入纹理
glGenTextures(1, &inputTexId);
checkGlError("glGenTextures inputTexId");
glBindTexture(GL_TEXTURE_2D, inputTexId);
checkGlError("glBindTexture inputTexId");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
GLint internalFormat = GL_RGBA;
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, (GLsizei) textureWidth, (GLsizei) textureHeight, 0, internalFormat, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
//初始化fbo的输出纹理
glGenTextures(1, &outputTexId);
checkGlError("glGenTextures outputTexId");
glBindTexture(GL_TEXTURE_2D, outputTexId);
checkGlError("glBindTexture outputTexId");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, (GLsizei) textureWidth, (GLsizei) textureHeight, 0, internalFormat, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
//初始化fbo
glGenFramebuffers(1, &FBO);
checkGlError("glGenFramebuffers");
//初始化旋转纹理
glGenTextures(1, &rotateTexId);
checkGlError("glGenTextures rotateTexId");
glBindTexture(GL_TEXTURE_2D, rotateTexId);
checkGlError("glBindTexture rotateTexId");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if (degress == 90 || degress == 270)
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, cameraHeight, cameraWidth, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
else
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, cameraWidth, cameraHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
//暂停时候的纹理
mixFilterId = -1;
glGenTextures(1, &pausedTexId);
checkGlError("glGenTextures pausedTexId");
glBindTexture(GL_TEXTURE_2D, pausedTexId);
checkGlError("glBindTexture pausedTexId");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, (GLsizei) textureWidth, (GLsizei) textureHeight, 0, internalFormat, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
LOGI("leave RecordingPreviewRenderer::init()");
}
关于上面的初始化主要需要注意:
片段着色器的纹理需要使用OpenGL ES扩展纹理类型,最终的着色器代码
static char* GPU_FRAME_FRAGMENT_SHADER =
"#extension GL_OES_EGL_image_external : require\n"
"precision mediump float;\n"
"uniform samplerExternalOES yuvTexSampler;\n"
"varying vec2 yuvTexCoords;\n"
"void main() {\n"
" gl_FragColor = texture2D(yuvTexSampler, yuvTexCoords);\n"
"}\n";
对应的生成的纹理也需要改变类型
int GPUTextureFrame::initTexture() {
glGenTextures(1, &decodeTexId);
glBindTexture(GL_TEXTURE_EXTERNAL_OES, decodeTexId);
if (checkGlError("glBindTexture")) {
return -1;
}
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
if (checkGlError("glTexParameteri")) {
return -1;
}
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
if (checkGlError("glTexParameteri")) {
return -1;
}
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
if (checkGlError("glTexParameteri")) {
return -1;
}
glTexParameteri(GL_TEXTURE_EXTERNAL_OES, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
if (checkGlError("glTexParameteri")) {
return -1;
}
return 1;
}
准备工作做完之后就可以启动摄像头的绘制了,这里通用从Native调用Java层开启绘制,并将扩展纹理的id传过来。
/** 当底层EGLThread创建初纹理之后,设置给Camera **/
public void startPreviewFromNative(int textureId) {
mCamera.setCameraPreviewTexture(textureId);
}
@TargetApi(Build.VERSION_CODES.JELLY_BEAN)
public void setCameraPreviewTexture(int textureId) {
Log.i(TAG, "setCameraPreviewTexture...");
mCameraSurfaceTexture = new SurfaceTexture(textureId);
try {
mCamera.setPreviewTexture(mCameraSurfaceTexture);
mCameraSurfaceTexture.setOnFrameAvailableListener(new SurfaceTexture.OnFrameAvailableListener() {
@Override
public void onFrameAvailable(SurfaceTexture surfaceTexture) {
if (null != mCallback) {
// Log.d("RecordingPublisher", "surfaceTexture time stamp is "+surfaceTexture.getTimestamp()/1000000000.0f);
mCallback.notifyFrameAvailable();
}
}
});
mCamera.startPreview();
} catch (Exception e) {
e.printStackTrace();
}
}
我们使用扩展纹理的Id生成一个SurfaceTexture,给Camera设置一个回调,在Camera获取到一帧数据之后会调用这个接口,我们要做的就是通知Native层进行绘制。然而这里还有一点需要做,就是在绘制之前需要调用SurfaceTexture先更新纹理的数据。
void MVRecordingPreviewController::renderFrame() {
if (NULL != eglCore && !isInSwitchingCamera) {
if (startTime == -1) {
startTime = getCurrentTime();
}
float position = ((float) (getCurrentTime() - startTime)) / 1000.0f;
//这个方法会先回调java层来更新SurfaceTexture
this->processVideoFrame(position);
if (previewSurface != EGL_NO_SURFACE) {
this->draw();
}
}
}
接下来看这个方法,在扩展纹理有数据之后,
void RecordingPreviewRenderer::processFrame(float position) {
glBindFramebuffer(GL_FRAMEBUFFER, FBO);
checkGlError("glBindFramebuffer FBO");
if (degress == 90 || degress == 270)
glViewport(0, 0, cameraHeight, cameraWidth);
else
glViewport(0, 0, cameraWidth, cameraHeight);
GLfloat* vertexCoords = this->getVertexCoords();
//这个方法将扩展纹理绘制到了旋转的纹理上,经过一系列的矩阵变换
mCopier->renderWithCoords(cameraTexFrame, rotateTexId, vertexCoords, textureCoords);
int rotateTexWidth = cameraWidth;
int rotateTexHeight = cameraHeight;
if (degress == 90 || degress == 270){
rotateTexWidth = cameraHeight;
rotateTexHeight = cameraWidth;
}
//这个方法将旋转纹理绘制到了输入纹理上
//注意目前都是在fbo上绘制,没有绘制到屏幕
mRenderer->renderToAutoFitTexture(rotateTexId, rotateTexWidth, rotateTexHeight, inputTexId);
//解绑Fbo
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
处理完成之后我们的inputTexId纹理是有数据的,然后我们再绘制到屏幕上。绘制的逻辑基本上大同小异,都是矩阵,顶点坐标以及纹理坐标。至于为什么要这些变换,当然是为了做视频的处理了。
void MVRecordingPreviewController::draw() {
eglCore->makeCurrent(previewSurface);
renderer->drawToViewWithAutofit(screenWidth, screenHeight, textureWidth, textureHeight);
if (!eglCore->swapBuffers(previewSurface)) {
LOGE("eglSwapBuffers(previewSurface) returned error %d", eglGetError());
}
}
最后总结一下,相机预览纹理需要是用扩展纹理而不是普通的2D纹理,对应的着色器也需要有变化,摄像头需要根据屏幕来做对应的旋转,然后就是使用了大量的fbo绘制到纹理的做法来进行图像的处理。
参考源码