上一章说到两个点,第一个是帧数据缓存池,缓存NV21帧数据(调用栈是JClass Activity->JClass CFEScheduler->JCallback Camera.onPreviewFrame->JNIMethod feedVideoData);第二个是setRotationCamera,根据旋转角度,调整水平翻转和垂直翻转(调用栈是JClass Activity->JClass CFEScheduler->JMethod setUpCamera->JNIMethod setRotationCamera);两者最终都会触发adjustFrameScaling,调整帧图缩放比例,生成用于OpenGL渲染的顶点坐标数据和纹理坐标数据。
接下来开始真正的把图像数据渲染到屏幕载体上。这次和前篇介绍的水印录制有些不一样,前篇介绍的方法是通过系统准备好的纹理对象,我们借用其纹理ID直接操作就完事了,。但这一次只有最原始的NV21格式的图像帧数据,如何高效的把图像帧数据渲染到输出载体surface成为主要的问题关键。
还要一个问题需要注意,往后添加各种滤镜效果,如何做到无缝切换?(暂时先放一个point在这)事不宜迟,show code.
首先看一下GLThread+GLRender的三大生命周期回调的内容:(如有疑问请参考以前的文章https://blog.csdn.net/a360940265a/article/details/88600962)
void GpuFilterRender::surfaceCreated(ANativeWindow *window)
{
if (mEglCore == NULL) {
mEglCore = new EglCore(NULL, FLAG_TRY_GLES2);
}
mWindowSurface = new WindowSurface(mEglCore, window, true);
assert(mWindowSurface != NULL && mEglCore != NULL);
LOGD("render surface create ... ");
mWindowSurface->makeCurrent();
if( mFilter==NULL) {
mFilter = new GpuBaseFilter();
} else {
mFilter->destroy();
}
mFilter->init();
mWindowSurface->swapBuffers();
// ...
}
void GpuFilterRender::surfaceChanged(int width, int height) {
this->mViewWidth = width;
this->mViewHeight = height;
mWindowSurface->makeCurrent();
mFilter->onOutputSizeChanged(width, height);
mWindowSurface->swapBuffers();
}
void GpuFilterRender::surfaceDestroyed() {
if (mWindowSurface) {
mWindowSurface->release();
delete mWindowSurface;
mWindowSurface = NULL;
}
if (mEglCore) {
mEglCore->release();
delete mEglCore;
mEglCore = NULL;
}
}
显然这个GpuBaseFilter将会是一个关键类,在整个滤镜渲染的过程中起到一个至关重要的作用。
所以赶紧看看这个GpuBaseFilter做了些什么。
#ifndef GPU_NORMAL_FILTER_HPP
#define GPU_NORMAL_FILTER_HPP
#define FILTER_TYPE_NORMAL 0x1010
/**
* Filter基础类,支持YUV / RGB渲染模式。
*/
class GpuBaseFilter {
public:
// 用于上层获取滤镜列表对应的Filter类型
virtual int getTypeId() { return FILTER_TYPE_NORMAL; }
GpuBaseFilter()
{
NO_FILTER_VERTEX_SHADER = "attribute vec4 position;\n\
attribute vec4 inputTextureCoordinate;\n\
varying vec2 textureCoordinate;\n\
void main()\n\
{\n\
gl_Position = position;\n\
textureCoordinate = inputTextureCoordinate.xy;\n\
}";
NO_FILTER_FRAGMENT_SHADER = "precision mediump float;\n\
varying highp vec2 textureCoordinate;\n\
uniform sampler2D SamplerRGB;\n\
uniform sampler2D SamplerY;\n\
uniform sampler2D SamplerU;\n\
uniform sampler2D SamplerV;\n\
mat3 colorConversionMatrix = mat3(\n\
1.0, 1.0, 1.0,\n\
0.0, -0.39465, 2.03211,\n\
1.13983, -0.58060, 0.0);\n\
vec3 yuv2rgb(vec2 pos)\n\
{\n\
vec3 yuv;\n\
yuv.x = texture2D(SamplerY, pos).r;\n\
yuv.y = texture2D(SamplerU, pos).r - 0.5;\n\
yuv.z = texture2D(SamplerV, pos).r - 0.5;\n\
return colorConversionMatrix * yuv;\n\
}\n\
void main()\n\
{\n\
gl_FragColor = vec4(yuv2rgb(textureCoordinate), 1.0);\n\
//gl_FragColor = texture2D(SamplerY/U/V, textureCoordinate);\n\
}";
}
virtual ~GpuBaseFilter()
{
if(!NO_FILTER_VERTEX_SHADER.empty()) NO_FILTER_VERTEX_SHADER.clear();
if(!NO_FILTER_FRAGMENT_SHADER.empty()) NO_FILTER_FRAGMENT_SHADER.clear();
mIsInitialized = false;
}
virtual void init() {
init(NO_FILTER_VERTEX_SHADER.c_str(), NO_FILTER_FRAGMENT_SHADER.c_str());
}
void init(const char *vertexShaderSource, const char *fragmentShaderSource) {
mGLProgId = ShaderHelper::buildProgram(vertexShaderSource, fragmentShaderSource);
mGLAttribPosition = static_cast(glGetAttribLocation(mGLProgId, "position"));
mGLUniformSampleRGB = static_cast(glGetUniformLocation(mGLProgId, "SamplerRGB"));
mGLUniformSampleY = static_cast(glGetUniformLocation(mGLProgId, "SamplerY"));
mGLUniformSampleU = static_cast(glGetUniformLocation(mGLProgId, "SamplerU"));
mGLUniformSampleV = static_cast(glGetUniformLocation(mGLProgId, "SamplerV"));
mGLAttribTextureCoordinate = static_cast(glGetAttribLocation(mGLProgId, "inputTextureCoordinate"));
mIsInitialized = true;
}
virtual void destroy() {
mIsInitialized = false;
glDeleteProgram(mGLProgId);
}
virtual void onOutputSizeChanged(int width, int height) {
mOutputWidth = width;
mOutputHeight = height;
}
virtual void onDraw(GLuint SamplerY_texId, GLuint SamplerU_texId, GLuint SamplerV_texId,
void* positionCords, void* textureCords)
{
if (!mIsInitialized)
return;
glUseProgram(mGLProgId);
// runPendingOnDrawTasks();
glVertexAttribPointer(mGLAttribPosition, 2, GL_FLOAT, GL_FALSE, 0, positionCords);
glEnableVertexAttribArray(mGLAttribPosition);
glVertexAttribPointer(mGLAttribTextureCoordinate, 2, GL_FLOAT, GL_FALSE, 0, textureCords);
glEnableVertexAttribArray(mGLAttribTextureCoordinate);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, SamplerY_texId);
glUniform1i(mGLUniformSampleY, 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, SamplerU_texId);
glUniform1i(mGLUniformSampleU, 1);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, SamplerV_texId);
glUniform1i(mGLUniformSampleV, 2);
// onDrawArraysPre();
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(mGLAttribPosition);
glDisableVertexAttribArray(mGLAttribTextureCoordinate);
glBindTexture(GL_TEXTURE_2D, 0);
}
// GLUniformSampleRGB对应的draw,我这里没用rgb模式.
//virtual void onDraw2(GLuint textureId, void* positionCords, void* textureCords)
//{
// if (!mIsInitialized)
// return;
// glUseProgram(mGLProgId);
// // runPendingOnDrawTasks();
// glVertexAttribPointer(mGLAttribPosition, 2, GL_FLOAT, GL_FALSE, 0, positionCords);
// glEnableVertexAttribArray(mGLAttribPosition);
// glVertexAttribPointer(mGLAttribTextureCoordinate, 2, GL_FLOAT, GL_FALSE, 0, textureCords);
// glEnableVertexAttribArray(mGLAttribTextureCoordinate);
// if (textureId != -1) {
// glActiveTexture(GL_TEXTURE0);
// glBindTexture(GL_TEXTURE_2D, textureId);
// glUniform1i(mGLUniformSampleRGB, 0);
// }
// // onDrawArraysPre();
// glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
// glDisableVertexAttribArray(mGLAttribPosition);
// glDisableVertexAttribArray(mGLAttribTextureCoordinate);
// glBindTexture(GL_TEXTURE_2D, 0);
//}
// 相关滤镜对应的可调整参数,通过此借口进行操作
virtual void setAdjustEffect(float percent) {
// subclass override
}
bool isInitialized(){ return mIsInitialized;}
GLuint getProgram(){ return mGLProgId;}
protected:
std::string NO_FILTER_VERTEX_SHADER;
std::string NO_FILTER_FRAGMENT_SHADER;
GLuint mGLProgId;
GLuint mGLAttribPosition;
GLuint mGLUniformSampleRGB;
GLuint mGLAttribTextureCoordinate;
GLuint mGLUniformSampleY;
GLuint mGLUniformSampleU;
GLuint mGLUniformSampleV;
int mOutputWidth;
int mOutputHeight;
bool mIsInitialized;
};
#endif // GPU_NORMAL_FILTER_HPP
篇幅有点长,这是一个标准C++创建的基类,带virtual关键字开头的函数都将被继承类所改写(相关知识点请自行百度)重点关注关于Shader的构造函数 和 关于渲染的onDraw方法。
先看看顶点着色器 NO_FILTER_VERTEX_SHADER:
attribute vec4 position;
attribute vec4 inputTextureCoordinate;
varying vec2 textureCoordinate;
void main()
{
gl_Position = position;
textureCoordinate = inputTextureCoordinate.xy;
}
内容不复杂,循规蹈矩的输出顶点坐标,把纹理坐标传递到片元着色器。
接下来再看 NO_FILTER_FRAGMENT_SHADER,这是本篇内容的重点之一
precision mediump float;
varying highp vec2 textureCoordinate;
uniform sampler2D SamplerRGB;
uniform sampler2D SamplerY;
uniform sampler2D SamplerU;
uniform sampler2D SamplerV;
mat3 colorConversionMatrix = mat3(
1.0, 1.0, 1.0,
0.0, -0.39465, 2.03211,
1.13983, -0.58060, 0.0);
vec3 yuv2rgb(vec2 pos)
{
vec3 yuv;
yuv.x = texture2D(SamplerY, pos).r;
yuv.y = texture2D(SamplerU, pos).r - 0.5;
yuv.z = texture2D(SamplerV, pos).r - 0.5;
return colorConversionMatrix * yuv;
}
void main()
{
gl_FragColor = vec4(yuv2rgb(textureCoordinate), 1.0);
//gl_FragColor = texture2D(SamplerRGB, textureCoordinate);
}
显然这里做了两手的准备,SamplerRGB是彩色纹理,用于注释掉的那句代码,根据纹理坐标直接得出gl_FragColor;SamplerY/U/V看名字可以知道三者是分别储存yuv格式的三种分量,然后通过yuv转rgb的通用公式:
R = Y + 0*U + 1.140*V
G = Y - 0.395*U - 0.581*V
B = Y + 2.032*U + 0*V
以上公式是未量化的小数形式。何为量化?
量化就是让通过线性变换让Y或U或V处于一定的范围内, 假设量化前Y/U/V~(0-255),量化后参数: Y~(16,235) U ~(16-240) V~(16-240),让Y(0,255) 变到 Y' (16,235) 就这样来实行:Y' = Y*[(235-16)/255] +16。
还有一点就是,OpenGL的矩阵是列优先,所以在自定义矩阵的时候,需要注意行列互换。
Shader准备完毕之后,接下来就是要如何针对NV21进行渲染了。
void GpuFilterRender::renderOnDraw(double elpasedInMilliSec)
{
if (mEglCore == NULL || mWindowSurface == NULL) {
LOGW("Skipping drawFrame after shutdown");
return;
}
pthread_mutex_lock(&mutex);
ByteBuffer* item = mNV21Pool.get();
if(item == NULL) {
pthread_mutex_unlock(&mutex);
return;
} else { // item!=NULL,i420BufferY/U/V也!=NULL
int8_t * nv21_buffer = item->data();
int y_len = item->param1;
int u_len = item->param2;
int v_len = item->param3;
// 装填y u v数据。
int8_t * dst_y = i420BufferY->data();
int8_t * dst_u = i420BufferU->data();
int8_t * dst_v = i420BufferV->data();
memcpy(dst_y, nv21_buffer, (size_t) y_len);
for (int i = 0; i < u_len; i++) {
//NV21 先v后u
*(dst_v + i) = (uint8_t) *(nv21_buffer + y_len + i * 2);
*(dst_u + i) = (uint8_t) *(nv21_buffer + y_len + i * 2 + 1);
}
// 删除BufferPool当中的引用。
delete item;
pthread_mutex_unlock(&mutex);//#赶紧解锁
// 画面渲染
mWindowSurface->makeCurrent();
yTextureId = updateTexture(dst_y, yTextureId, mFrameWidth, mFrameHeight);
uTextureId = updateTexture(dst_u, uTextureId, mFrameWidth/2, mFrameHeight/2);
vTextureId = updateTexture(dst_v, vTextureId, mFrameWidth/2, mFrameHeight/2);
if( mFilter!=NULL) {
mFilter->onDraw(yTextureId, uTextureId, vTextureId, positionCords, textureCords);
}
mWindowSurface->swapBuffers();
}
}
GLuint GpuFilterRender::updateTexture(int8_t *src, int texId, int width, int height)
{
GLuint mTextureID;
if( texId == -1) {
glGenTextures(1, &mTextureID);
glBindTexture(GL_TEXTURE_2D, mTextureID);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width, height,
0, GL_LUMINANCE, GL_UNSIGNED_BYTE, src);
} else {
glBindTexture(GL_TEXTURE_2D, texId);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height,
GL_LUMINANCE, GL_UNSIGNED_BYTE, src);
mTextureID = texId;
}
glBindTexture(GL_TEXTURE_2D, 0);
return mTextureID;
}
回顾一下我们在feedVideoData时候填充nv21数据到缓存池mNV21Pool,在GLThread->renderOnDraw的回调当中,我们就不停的获取nv21,然后按照NV21的格式(YYYYVU ...),分解成Y/U/V三个buffer,再然后通过updateTexture方法,及时更新纹理对象。这里有个容易出错的地方需要注意,Ybuffer的长宽是正常的FrameSize,但是UV的长宽是FrameSize/2,如果没有正确的对应长宽,glTexImage2D / glTexSubImage2D会报SIGSEGV的错误崩溃。往后就可以通过GpuBaseFilter.onDraw渲染了。
virtual void onDraw(GLuint SamplerY_texId, GLuint SamplerU_texId, GLuint SamplerV_texId,
void* positionCords, void* textureCords)
{
if (!mIsInitialized)
return;
glUseProgram(mGLProgId);
//绑定顶点坐标 和 纹理坐标
glVertexAttribPointer(mGLAttribPosition, 2, GL_FLOAT, GL_FALSE, 0, positionCords);
glEnableVertexAttribArray(mGLAttribPosition);
glVertexAttribPointer(mGLAttribTextureCoordinate, 2, GL_FLOAT, GL_FALSE, 0, textureCords);
glEnableVertexAttribArray(mGLAttribTextureCoordinate);
// 把Y/U/V分别绑定三个纹理对象
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, SamplerY_texId);
glUniform1i(mGLUniformSampleY, 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, SamplerU_texId);
glUniform1i(mGLUniformSampleU, 1);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, SamplerV_texId);
glUniform1i(mGLUniformSampleV, 2);
// 经过shader的渲染
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(mGLAttribPosition);
glDisableVertexAttribArray(mGLAttribTextureCoordinate);
glBindTexture(GL_TEXTURE_2D, 0);
}
整个流程到此走通,到这里我们已经可以看到适配屏幕比例的摄像头视频源,下一章主要内容是通过设置三个简单的滤镜效果,提供一个无缝切换滤镜的通用方法。
项目地址:https://github.com/MrZhaozhirong/NativeCppApp