利用QT实现截屏的四种方法;
一:截取整个屏幕
(1)代码如下:
void Test::slotGrabFullScreen()
{
QScreen *screen = QGuiApplication::primaryScreen();
QString filePathName = "full-";
filePathName += QDateTime::currentDateTime().toString("yyyy-MM-dd hh-mm-ss-zzz");
filePathName += ".jpg";
if(!screen->grabWindow(0).save(filePathName, "jpg"))
{
cout<<"save full screen failed"<<endl;
}
}
(2)效果如下图:
注意:此方法用于保存整个屏幕。
二:截取某个控件(QWidget)
(1)代码如下:
void Test::slotGrabWidgetScreen()
{
QRect rect = ui.vw->geometry();
QPixmap p = this->grab(QRect(0, 0, 1000, 800));
QString filePathName = "widget";
filePathName += QDateTime::currentDateTime().toString("yyyy-MM-dd hh-mm-ss-zzz");
filePathName += ".png";
if(!p.save(filePathName,"png"))
{
cout<<"save widget screen failed"<<endl;
}
}
调用QWidget的grab方法。
(2)效果图:
注意:此方法对截取播放视频的widget无效。播放视频的widget(QVideoWidget)
三:截取视频图片
方法:先截取整个屏幕,保存为图片,然后计算获取到视频在图片的位置。
(1)代码如下:
void Test::slotCutScreen()
{
// 保存整个屏幕为QPixmap
QScreen *screen = QGuiApplication::primaryScreen();
QString filePathName = "cut-";
filePathName += QDateTime::currentDateTime().toString("yyyy-MM-dd hh-mm-ss-zzz");
filePathName += ".png";
QPixmap pixmap = screen->grabWindow(0);
if(!pixmap.save(filePathName,"png"))
{
cout<<"cut save png failed"<<endl;
}
// 计算视频的位置和大小
cout<<"Full pixmap width: "<<pixmap.width()<<" height: "<<pixmap.height()<<endl;
QRect geo = this->geometry();
QRect appGeo = geo; // 整个应用程序在图片中的位置。
cout<<"App x: "<<geo.x()<<" y: "<<geo.y()<<" width: "<<geo.width()<<" height: "<<geo.height()<<endl;
geo = ui.vw->geometry(); // 播放视频在图片中的位置。
cout<<"VW x: "<<geo.x()<<" y: "<<geo.y()<<" width: "<<geo.width()<<" height: "<<geo.height()<<endl;
QWidget *centerWidget = centralWidget(); // QMainWindow在应用程序的位置
QRect centerRect = centerWidget->geometry();
cout<<"center x: "<<centerRect.x()<<" y: "<<centerRect.y()<<" width: "<<centerRect.width()<<" height: "<<centerRect.height()<<endl;
QRect copyGeo;
copyGeo.setX(geo.x() + appGeo.x() + centerRect.x()); // x=三个x相加
copyGeo.setY(geo.y() + appGeo.y() + centerRect.y());
copyGeo.setWidth(geo.width());
copyGeo.setHeight(geo.height());
cout<<"VW1 x: "<<copyGeo.x()<<" y: "<<copyGeo.y()<<" width: "<<copyGeo.width()<<" height: "<<copyGeo.height()<<endl;
QPixmap pixmapCopy = pixmap.copy(copyGeo); // copy图片
filePathName.prepend("Copy+");
if(!pixmapCopy.save(filePathName,"png"))
{
cout<<"copy cut save png failed"<<endl;
}
}
(2)效果图:整个屏幕
(3)截取视频
注意:此方法有两个弊端
A:此方法截图的视频图片与窗口大小有关。有可能截取出来的像素非常低(播放视频的窗口很小)
B:如果视频的分辨率与窗口大小不一致,则截取出来的图片有黑边框。(看上图左右两边有黑色边框)
三、利用FFMPEG获取视频图片
处理步骤:
1:获取一帧数据
2:利用FFMPEG将YUV格式转换为RGB格式
3:保存图片
(1)源代码
// 以下是两个成员变量。一个是player,一个是侦测帧对象
QMediaPlayer *mPlayer;
QVideoProbe *mVideoProbe;
void Test::slotGrabMediaScreenFFMPEG()
{
connect(mVideoProbe, SIGNAL(videoFrameProbed(QVideoFrame)), this, SLOT(slotProcessFrameFFMPEG(QVideoFrame)));
}
void Test::slotProcessFrameFFMPEG(const QVideoFrame & buffer)
{
disconnect(mVideoProbe, SIGNAL(videoFrameProbed(QVideoFrame)), this, SLOT(slotProcessFrameFFMPEG(QVideoFrame)));
if(!buffer.isValid()) // 数据是否有效
{
cout<<"frame is invalid"<<endl;
connect(mVideoProbe, SIGNAL(videoFrameProbed(QVideoFrame)), this, SLOT(slotProcessFrame(QVideoFrame)));
return;
}
QImage img;
QVideoFrame frame(buffer); // 拷贝数据
frame.map(QAbstractVideoBuffer::ReadOnly); // 将视频缓存映射到内存中
int totalBytes = frame.width() * frame.height() * 3;
uchar *imageBuffer = (uchar*)malloc(totalBytes);
if(!YV12ToARGB24_FFmpeg(frame.bits(), imageBuffer, frame.width(), frame.height()))
{
cout<<"convert YUV to RGB failed"<<endl;
return;
}
img = QImage(imageBuffer, frame.width(), frame.height(), //frame.bytesPerLine(),
//imageFormat);
QImage::Format_RGB888);
QString filePathName = "convert-vedio-";
filePathName += QDateTime::currentDateTime().toString("yyyy-MM-dd hh-mm-ss-zzz");
filePathName += ".png";
if(!img.save(filePathName,"png"))
{
cout<<"save convert vedio screen failed"<<endl;
}
}
bool Test::YV12ToARGB24_FFmpeg(unsigned char* pYUV,unsigned char* pBGR24,int width,int height)
{
if (width < 1 || height < 1 || pYUV == NULL || pBGR24 == NULL)
return false;
AVPicture pFrameYUV, pFrameBGR;
avpicture_fill(&pFrameYUV, pYUV, PIX_FMT_NV12, width, height);
avpicture_fill(&pFrameBGR, pBGR24, AV_PIX_FMT_RGB24, width,height);
struct SwsContext* imgCtx = NULL;
imgCtx = sws_getContext(width, height, PIX_FMT_NV12, width, height, AV_PIX_FMT_RGB24, SWS_BICUBIC, 0, 0, 0);
if (imgCtx != NULL){
sws_scale(imgCtx, pFrameYUV.data, pFrameYUV.linesize, 0, height, pFrameBGR.data, pFrameBGR.linesize);
if(imgCtx){
sws_freeContext(imgCtx);
imgCtx = NULL;
}
return true;
}
else{
sws_freeContext(imgCtx);
imgCtx = NULL;
return false;
}
}
(2)效果图
注意:这样就把视频给截取出来了。完全和视频一致。
关于FFMPEG的搭建,见
http://blog.csdn.net/huangqi734044860/article/details/60956995
五:结论
终于搞定了。