上次说完了在VideoThreadRender类中实现几个重要类之间的关系及通信,搭建好了一个初始化可以运行并调用的流程环境;这篇主要介绍后半分的子线程及解码
VideoThreadRender类中会实例TextureNode、RenderThread这两个类;TextueNode主要是将RenderThead类渲染好的视频帧的纹理id送到渲染线程中去(主要通过setTexture函数),并通知子线程渲染下一帧;RenderThread类主要负责渲染。
TextureNode类如下
#ifndef TEXTURENODE_H
#define TEXTURENODE_H
#include
#include
QT_FORWARD_DECLARE_CLASS(QQuickWindow)
QT_FORWARD_DECLARE_CLASS(QSGTexture)
class TextureNode : public QObject, public QSGSimpleTextureNode
{
Q_OBJECT
public:
explicit TextureNode(QQuickWindow *window, QObject *parent = nullptr);
~TextureNode();
signals:
void pendingNewTexture(); //有一帧新视频挂起
void textureInUse(); //纹理正在使用、可以渲染下一帧了
public slots:
void newTexture(int id, const QSize &size); //当子线程渲染好一帧时调用
void prepareNode(); //当渲染线程准备开始渲染时,会发出beforRendering信号;这时会调用该函数
private:
int m_id;
QSize m_size;
QSGTexture *m_texture{nullptr};
QQuickWindow *m_window{nullptr};
};
#endif // TEXTURENODE_H
#include "texturenode.h"
#include
TextureNode::TextureNode(QQuickWindow *window, QObject *parent):
QObject(parent),
m_window(window),
m_id(0),
m_texture(0),
m_size(0,0)
{
m_texture = m_window->createTextureFromId(m_id,QSize(1,1)); //初始化使用0号纹理
setTexture(m_texture);
setFiltering(QSGTexture::Linear);
}
TextureNode::~TextureNode()
{
delete m_texture;
}
void TextureNode::newTexture(int id, const QSize &size)
{
m_id = id; //原来这里还有锁、由于发信号和执行是有顺序的,我就把它取消了
m_size = size;
emit pendingNewTexture();
}
void TextureNode::prepareNode()
{
int newId = m_id;
QSize size = m_size;
m_id = 0;
if(newId){
delete m_texture;
m_texture = m_window->createTextureFromId(newId, size);
setTexture(m_texture);
markDirty(DirtyMaterial); //通知其关联的其它控件纹理,材质发生了改变
emit textureInUse();
}
}
这个类功能还是很简单,主要是用纹理把子线程和GUI线程、渲染线程结合起来
下面是子线程。
#ifndef RENDERTHREAD_H
#define RENDERTHREAD_H
#include
#include
#include
QT_FORWARD_DECLARE_CLASS(QOpenGLFramebufferObject)
QT_FORWARD_DECLARE_CLASS(Nv12Render)
QT_FORWARD_DECLARE_CLASS(DecodeThread)
class RenderThread : public QThread
{
Q_OBJECT
public:
RenderThread(QSize &size,QObject *parent = nullptr);
~RenderThread();
QOpenGLContext *context{nullptr};
QOffscreenSurface *surface{nullptr}; //其create方法只能在GUI线程(主线程)调用,所以弄成了全局变量,方便访问
QString videoSource;
signals:
void textureReady(int, const QSize); //视频帧渲染好了
public slots:
void renderNext(); //渲染下一帧
void shutDown(); //关闭退出
private:
QOpenGLFramebufferObject *m_renderFbo{0}; //用于渲染
QOpenGLFramebufferObject *m_displayFbo{0}; //用于显示
QSize m_size;
Nv12Render *m_videoRender{nullptr}; //渲染器
DecodeThread *m_decoder{nullptr}; //解码
};
#endif // RENDERTHREAD_H
#include "renderthread.h"
#include
#include
#include "nv12render.h"
#include "../NvDecode/decodethread.h"
#include
RenderThread::RenderThread(QSize &size, QObject *parent):
QThread(parent),
m_size(size)
{
m_decoder = new DecodeThread; //实例化解码线程
}
RenderThread::~RenderThread()
{
m_decoder->requestInterruption();
m_decoder->quit();
m_decoder->wait();
}
void RenderThread::renderNext()
{
context->makeCurrent(surface); //使上下文回到离屏的surface
if (!m_renderFbo) {
QOpenGLFramebufferObjectFormat format;
format.setAttachment(QOpenGLFramebufferObject::CombinedDepthStencil);
m_renderFbo = new QOpenGLFramebufferObject(m_size, format); //创建和windown(显示屏)格式一样的FBO
m_displayFbo = new QOpenGLFramebufferObject(m_size, format);
m_videoRender = new Nv12Render(); //实例化解码器
m_videoRender->initialize(); //初始化
m_decoder->setUrl(videoSource); //设置视频地址
m_decoder->start(); //开始解码
}
m_renderFbo->bind(); //绑定渲染的FBO到当前上下文
context->functions()->glViewport(0, 0, m_size.width(), m_size.height()); //重调视口
// m_videoRender->render();
m_videoRender->renderFrame(m_decoder->imgPtr,m_decoder->videoW,m_decoder->videoH); //渲染一帧视频
context->functions()->glFlush(); //刷新一下
m_renderFbo->bindDefault(); //渲染的FBO绑定到默认的上下文(也就是与主屏的surface对应的那个上下文),测试了,不绑定也可以
qSwap(m_renderFbo, m_displayFbo); //交换两个FBO的内容
emit textureReady(m_displayFbo->texture(), m_size);
}
void RenderThread::shutDown()
{
context->makeCurrent(surface);
delete m_renderFbo;
delete m_displayFbo;
delete m_videoRender;
context->doneCurrent();
delete context;
surface->deleteLater();
exit();
moveToThread(QGuiApplication::instance()->thread());
}
渲染器中,是有关opengl渲染Nv12视频的内容了,其转换工式也是在网上找的,只不过我用qt实现了
#ifndef NV12RENDER_H
#define NV12RENDER_H
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
class Nv12Render : protected QOpenGLFunctions
{
public:
Nv12Render();
~Nv12Render();
void render();
void renderFrame(uchar* imgPtr, uint videoW,uint videoH);
void initialize();
private:
qreal m_fAngle;
qreal m_fScale;
QOpenGLShaderProgram program1;
QOpenGLBuffer vbo;
QOpenGLTexture *textureY;
QOpenGLTexture *textureUV;
};
#endif // NV12RENDER_H
#include "nv12render.h"
#include
#include
#include
Nv12Render::Nv12Render()
{
}
Nv12Render::~Nv12Render()
{
}
void Nv12Render::initialize()
{
initializeOpenGLFunctions();
glClearColor(0.1f, 0.1f, 0.2f, 1.0f);
glDisable(GL_DEPTH_TEST);
const char *vsrc1 =
"attribute vec4 vertexIn; \
attribute vec4 textureIn; \
varying vec4 textureOut; \
uniform mediump mat4 matrix;\
void main(void) \
{ \
gl_Position = vertexIn * matrix; \
textureOut = textureIn; \
}";
const char *fsrc1 =
"varying mediump vec4 textureOut;\n"
"uniform sampler2D textureY;\n"
"uniform sampler2D textureUV;\n"
"void main(void)\n"
"{\n"
"vec3 yuv; \n"
"vec3 rgb; \n"
"yuv.x = texture2D(textureY, textureOut.st).r - 0.0625; \n"
"yuv.y = texture2D(textureUV, textureOut.st).r - 0.5; \n"
"yuv.z = texture2D(textureUV, textureOut.st).g - 0.5; \n"
"rgb = mat3( 1, 1, 1, \n"
"0, -0.39465, 2.03211, \n"
"1.13983, -0.58060, 0) * yuv; \n"
"gl_FragColor = vec4(rgb, 1); \n"
"}\n";
program1.addCacheableShaderFromSourceCode(QOpenGLShader::Vertex, vsrc1);
program1.addCacheableShaderFromSourceCode(QOpenGLShader::Fragment, fsrc1);
program1.link();
program1.bind();
GLfloat vertex[]{
-1.0f,+1.0f, //视点默认在(0,0,0)点,朝向是z轴负方向,即从屏幕里看向屏幕外。所以视频顶点的z值要大些。也可以通过关闭深度测试,实现同时看到视频和其他图形,而且关闭深度测试可以提高程序运行效率
+1.0f,+1.0f,
+1.0f,-1.0f,
-1.0f,-1.0f,
//纹理坐标
0.0f,1.0f,1.0f,
1.0f,1.0f,1.0f,
1.0f,0.0f,1.0f,
0.0f,0.0f,1.0f,
};
vbo.create();
vbo.bind();
vbo.allocate(vertex,sizeof(vertex));
program1.enableAttributeArray("vertexIn");
program1.enableAttributeArray("textureIn");
program1.setAttributeBuffer("vertexIn",GL_FLOAT,0,2,2*sizeof(GLfloat));
program1.setAttributeBuffer("textureIn",GL_FLOAT,2 * 4 * sizeof(GLfloat),3,3*sizeof(GLfloat));
program1.setUniformValue("textureY",3);
program1.setUniformValue("textureUV",4);
textureY = new QOpenGLTexture(QOpenGLTexture::Target2D);
textureUV = new QOpenGLTexture(QOpenGLTexture::Target2D);
textureY->create();
textureUV->create();
textureY->bind(3);
textureUV->bind(4);
m_fAngle = 0;
m_fScale = 1;
}
void Nv12Render::render()
{
glClearColor(0.5f, 0.5f, 0.7f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
QMatrix4x4 modelview;
modelview.rotate(m_fAngle, 0.0f, 1.0f, 0.0f);
modelview.rotate(m_fAngle, 1.0f, 0.0f, 0.0f);
modelview.rotate(m_fAngle, 0.0f, 0.0f, 1.0f);
modelview.scale(m_fScale);
modelview.translate(0.0f, -0.2f, 0.0f);
program1.setUniformValue("matrix", modelview);
glDrawArrays(GL_TRIANGLE_FAN,0,3);
m_fAngle += 1.0f;
}
void Nv12Render::renderFrame(uchar *imgPtr, uint videoW, uint videoH)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glActiveTexture(GL_TEXTURE3);
glTexImage2D(GL_TEXTURE_2D,0,GL_RED,videoW,videoH,0,GL_RED,GL_UNSIGNED_BYTE,imgPtr);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glActiveTexture(GL_TEXTURE4);
glTexImage2D(GL_TEXTURE_2D,0,GL_RG,videoW >> 1,videoH >> 1,0,GL_RG,GL_UNSIGNED_BYTE,imgPtr + videoW * videoH);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
QMatrix4x4 modelview;
modelview.rotate(m_fAngle, 0.0f, 1.0f, 0.0f);
modelview.rotate(m_fAngle, 1.0f, 0.0f, 0.0f);
modelview.rotate(m_fAngle, 0.0f, 0.0f, 1.0f);
modelview.scale(m_fScale);
modelview.translate(0.0f, -0.2f, 0.0f);
program1.setUniformValue("matrix", modelview);
glDrawArrays(GL_QUADS, 0, 4);
m_fAngle += 1.0f;
}
这里的调用是非常简单的
#ifndef DECODETHREAD_H
#define DECODETHREAD_H
#include
class DecodeThread : public QThread
{
Q_OBJECT
public:
DecodeThread(QObject*parent = nullptr);
void setUrl(QString);
uchar* imgPtr{nullptr};
uint videoW,videoH;
signals:
void sigImage(uchar*,uint,uint);
protected:
void run() override;
private:
QString _url;
};
#endif // DECODETHREAD_H
#include "decodethread.h"
#include "NvDecode.h"
DecodeThread::DecodeThread(QObject *parent)
{
}
void DecodeThread::setUrl(QString url)
{
_url = url;
}
void DecodeThread::run()
{
NvDecode decod;
std::string mmurl = _url.toStdString();
decod.start(mmurl);
unsigned char *nv12Ptr = nullptr;
int width = 0, height = 0;
unsigned long long timestamp = 0;
while (!decod.m_pFrameQueue->isEndOfDecode() && !isInterruptionRequested())
{
if (decod.deQueueFrame(&nv12Ptr, &width, &height, ×tamp)) {
imgPtr = nv12Ptr;
videoW = width;
videoH = height;
// emit sigImage(nv12Ptr,width,height);
QThread::msleep(5);
}
else {
QThread::msleep(5);
continue;
}
}
}
最后提一下在主函数中的注册和qml中的调用
#include
#include
#include
#include "VideoThreadRenderer/videothreadrender.h"
int main(int argc, char *argv[])
{
QCoreApplication::setAttribute(Qt::AA_EnableHighDpiScaling);
QGuiApplication app(argc, argv);
int execReturn = 0;
{
QQmlApplicationEngine engine;
qmlRegisterType("SceneGraphRendering", 1, 0, "Renderer");
engine.load(QUrl(QLatin1String("qrc:/main.qml")));
if (engine.rootObjects().isEmpty())
return -1;
execReturn = app.exec();
}
for(QThread *t : qAsConst(VideoThreadRender::threads)){
t->wait();
delete t;
}
return execReturn;
}
import QtQuick 2.7
import QtQuick.Controls 2.0
import QtQuick.Layouts 1.3
import SceneGraphRendering 1.0
ApplicationWindow {
visible: true
width: 640
height: 480
title: qsTr("Hello World")
Renderer{
anchors.fill: parent
videoSource: "D:/迅雷下载/香肠派对720p.mp4"
Text {
id: name
anchors.centerIn: parent
color: "red"
font.pixelSize: 18
text: qsTr("text")
}
}
}
到此结束,离屏渲染就做好了;测试了下解码720P的高清视频,cpu占2-5左右,GPU占10左右,感觉还行,因为这个是没有按视频的帧率来解的,只是每帧解出来之后做了5ms的延时,界面显示完一帧自动显示下一帧,视频播放的速度相当快。