FFMPEG+QT4.8+VS2010下的多线程BMP序列帧合成视频

大量参照了雷神的博客点击打开链接

videoMuxer()和flush_encoder()是直接copy的雷神的代码,但是videoMutex我直接和融到了makeH264()中,所以这里没使用,flush_encoder()中也有相应改动

BMP读取:

#ifndef READBMPTHREAD_H
#define READBMPTHREAD_H
#include "IncludeFile.h"
#include 
//#include 
static int gFirstNum = 0;//每个线程开始的位置
static int gOkNum = 0; //读取的总数
class ReadBmpThread : public QThread
{
	Q_OBJECT

public:
	ReadBmpThread(const QMap&bmpMap, QMutex&mutex,bool &goOn,int threadNum=1, QObject *parent=0);
	~ReadBmpThread();
	const QMap& m_bmpMap;
	QMapm_readFrameMap;
	QMutex& m_mutex;
	bool& m_goOn;
	int m_allThreadNum;
	
protected:
	void run();

private:
	//读取单张BMP,24或32bit
	int readBmp2FrameMap(const char*bmpPath,int num);
	static int s_allThreadNum;
	int m_thisThreadNum;
};

#endif // READBMPTHREAD_H

#include "readbmpthread.h"
extern void writeMsg(char* msg, char* mode="a+", char* fileName = ERR_TXT);
ReadBmpThread::ReadBmpThread(const QMap&bmpMap, QMutex&mutex, bool &goOn,int threadNum,QObject *parent)
	:m_bmpMap(bmpMap),/*m_frameMap(frameMap), */m_mutex(mutex),m_goOn(goOn),m_allThreadNum(threadNum),QThread(parent)
{
	m_thisThreadNum = s_allThreadNum;
	printf("thread : %d init\n",m_thisThreadNum);
	s_allThreadNum++;
	//m_thisOkNum=0;
}

ReadBmpThread::~ReadBmpThread()
{
	
}
int ReadBmpThread::s_allThreadNum = 0;

void ReadBmpThread::run()
{
	printf("thread : %d run\n",m_thisThreadNum);
	QTime readAllTime;
	readAllTime.start();
	
		
	int n = m_thisThreadNum;

	for(;n30)
		{
			;
		//sleep(5);
		}
		readBmp2FrameMap(m_bmpMap.value(n).toLatin1().data(), n);
	}
	m_goOn = false;
	int aa = readAllTime.elapsed();
	//m_frameMap.insert(-1,NULL);//读取结束
}
int ReadBmpThread::readBmp2FrameMap(const char*bmpPath,int num)
{
	QTime time_;
	time_.start();
	

	AVFrame* rgbFrame =NULL;
	//wf_bmp2Frame(bmpPath, rgbFrame);
	//二进制读方式打开指定的图像文件  
	FILE *fp=fopen(bmpPath,"rb");  
	if(fp==0) return 0;  
	//跳过位图文件头结构BITMAPFILEHEADER  
	fseek(fp, sizeof(BITMAPFILEHEADER),0);  
	//定义位图信息头结构变量,读取位图信息头进内存,存放在变量head中  
	BITMAPINFOHEADER head;     
	fread(&head, sizeof(BITMAPINFOHEADER), 1,fp);    
	//获取图像宽、高、每像素所占位数等信息  
	int biWidth = head.biWidth;  
	int biHeight = head.biHeight;  
	int biBitCount = head.biBitCount;  
	//定义变量,计算图像每行像素所占的字节数(必须是4的倍数)  
	int lineByte=(biWidth * biBitCount/8+3)/4*4;  
	//位图深度
	if(biBitCount != 24 && biBitCount != 32)
	{
		char err[100];
		sprintf(err,"bmp file: %s  is not  24 or 32 bit\n ", bmpPath);
		writeMsg(err);
		return 0;
	}
	//申请位图数据所需要的空间,读位图数据进内存  
	uint8_t* bmpBuffer = (uint8_t* )av_malloc(lineByte* biHeight);
	fread(bmpBuffer,1,lineByte * biHeight,fp);  
	//关闭文件  
	fclose(fp);  
	
	//倒置(转正)
	if(1)
	{
		uint8_t* tempData = (uint8_t*)av_malloc(lineByte*biHeight);
		for(int h=0; hwidth = biWidth;
	//rgbFrame->height = biHeight;
	//rgbFrame->linesize[0] = lineByte;
	//rgbFrame->format = pixFmt;
	//printf("w:%d , h:%d, linesize: %d\n",biWidth, biHeight, lineByte);
	rgbFrame = av_frame_alloc();
	avpicture_fill((AVPicture *)rgbFrame, bmpBuffer,/*AV_PIX_FMT_RGB24 */pixFmt, biWidth, biHeight);  
	rgbFrame->width = biWidth;
	rgbFrame->height = biHeight;
	rgbFrame->linesize[0] = lineByte;
	rgbFrame->format = pixFmt;
	printf("w:%d , h:%d, linesize: %d\n",biWidth, biHeight, lineByte);
	//dstFrame = rgbFrame;
	//	rgbFrame = NULL;
	std::cout<<"read time: "<

视频合成:

#ifndef MAKEVIDEO_H
#define MAKEVIDEO_H
#include "IncludeFile.h"

#include 
#include "readbmpthread.h"
class MakeVideo : public QObject
{
	Q_OBJECT

public:
	MakeVideo(const char* imgDir, int threadNum=1,QObject *parent=0);
	~MakeVideo();
	void updateBmpList(const char* dirPath);
	bool makeH264(const char*videoName,int fps, int qp, int flat, int threadNum, int width, int height);
	int flush_encoder(AVFormatContext *fmt_encode,AVFormatContext*fmt_write,unsigned int stream_index);
	int videoMuxer( const char* inVideoName, const char* outVideoName);
	QMapm_bmpMap;//BMP filepath
	QMutex m_mutex;
	bool goOn;
	int m_threadNum;
private:
int okNum;
	 QListm_threadList;
	 QString temp;	
};

#endif // MAKEVIDEO_H
#include "makevideo.h"
extern void writeMsg(char* msg, char* mode="a+", char* fileName = ERR_TXT);
MakeVideo::MakeVideo(const char* imgDir,int threadNum,QObject *parent)
	:m_threadNum(threadNum),goOn(true), QObject(parent)
{
	updateBmpList(imgDir);
	printf("总BMP数量:%d\n",m_bmpMap.size());
//	Sleep(2000);
	for(int n=0;nstart();
	}
}

MakeVideo::~MakeVideo()
{

}
void MakeVideo::updateBmpList(const char* dirPath)
{

	QDir dir;
	dir.setPath(dirPath);
	if(!dir.exists())
	{
		char err[64];
		sprintf(err,"dir: %s error\n", dirPath);
		writeMsg(err);
		exit(0);
	}
	QStringList strList = dir.entryList(QStringList()<<"*.BMP"<<"*.bmp",QDir::Files, QDir::Time);
	if(strList.size() == 0)
	{
		printf("dir: %s have no file\n", dirPath);
		return;
	}
	for(int n=0;noformat = av_guess_format("h264", NULL,NULL);
//fmt->oformat = av_guess_format("mpg2", NULL,NULL);
	if(avio_open2(&fmt->pb, videoName, AVIO_FLAG_READ_WRITE, NULL, NULL) != 0)
	{
		char err[100];
		sprintf(err,"Couldn't open output file: %s  \n",videoName);
		writeMsg(err);
		exit(1);
	}
	printf("codecCtx init\n");
	outStream = avformat_new_stream(fmt, NULL);
	codecCtx = outStream->codec;
	codecCtx->codec_id = fmt->oformat->video_codec;  //比如265  265
	codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;  
	codecCtx->pix_fmt = AV_PIX_FMT_YUV444P/*AV_PIX_FMT_YUV444P*/;  
	codecCtx->width = width;
	codecCtx->height = height;  
	codecCtx->time_base.num = 1;  
	codecCtx->time_base.den = fps;
	codecCtx->gop_size=10;  

	//codecCtx->cqp=0;
	codecCtx->thread_count=threadNum;
	codecCtx->thread_type;
	codecCtx->max_b_frames=0;  

/*使用固定QP或者码率控制*/
#if 1
	codecCtx->qmin = qp;  
	codecCtx->qmax = qp+5;
	codecCtx->bit_rate = 10*1024*1024*8;//只在QP范围内生效,为QP波动范围中的最小比特率
#else 
	codecCtx->flags|=CODEC_FLAG_QSCALE;
	codecCtx->qmin = 0;  
	codecCtx->qmax =49;
	codecCtx->bit_rate = 5*1024*1024*8;
	codecCtx->rc_min_rate=1*1024*1024*8;
	codecCtx->rc_max_rate=6*1024*1024*5;
#endif
	

	//codecCtx->level = 50;
	printf("codecCtx OK\n");
	//codecCtx->flags = CODEC_FLAG_LOW_DELAY;
	AVDictionary *param = 0;  
	//H.264  
	if(codecCtx->codec_id == AV_CODEC_ID_H264) 
	{  
		av_dict_set(¶m, "preset", "slower", 0);  
	
	switch(flat)
	{
	case 1: av_dict_set(¶m, "tune", "film", 0);  break;
	case 2: av_dict_set(¶m, "tune", "animation", 0);  break;
	case 3: av_dict_set(¶m, "tune", "grain", 0);  break;
	case 4: av_dict_set(¶m, "tune", "stillimage", 0);  break;
	case 0: break;
	}	

	//	av_dict_set(¶m, "tune", "grain", 0);  

	/*film:  电影、真人类型; 
	animation:  动画; 
	grain:      需要保留大量的grain时用; 
	stillimage:  静态图像编码时使用; 
	psnr:      为提高psnr做了优化的参数; 
	ssim:      为提高ssim做了优化的参数; 
	fastdecode: 可以快速解码的参数; 
	zerolatency:零延迟,用在需要非常低的延迟的情况下,比如电视电话会议的编码。*/
	}  
	if(codecCtx->codec_id == AV_CODEC_ID_H265){  
		av_dict_set(¶m, "x265-params", "qp=5", 0);  
		av_dict_set(¶m, "preset", "ultrafast", 0);  
		av_dict_set(¶m, "tune", "zero-latency", 0);  
	}  
//	avformat_write_header(fmt, NULL);
	codec = avcodec_find_encoder(codecCtx->codec_id);
	avcodec_open2(codecCtx, codec, ¶m);
	int n=0;//关键数据
	
	QTime allTime;//关键数据
	int encodecNum=0;
	allTime.start();
	okNum = 0;
/************************************************************************/
/* AVI								                                                                     */
/************************************************************************/
#if 1
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ofmt_ctx = NULL;
	AVPacket pkt_avi;
	const char* out_filename = videoName;
	int ret, i;
	avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
	if (!ofmt_ctx) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		return 0;
	}
	ofmt = ofmt_ctx->oformat;
	int videoindex_out=-1;

		//根据输入流创建输出流(Create output AVStream according to input AVStream)

			
			AVStream *in_stream = outStream;
			AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
			if (!out_stream) {
				printf( "Failed allocating output stream\n");
				ret = AVERROR_UNKNOWN;
				return 0;
			}
			videoindex_out=out_stream->index;
			//复制AVCodecContext的设置(Copy the settings of AVCodecContext)
			if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
				printf( "Failed to copy context from input to output stream codec context\n");
				return 0;
			}
			out_stream->codec->codec_tag = 0;
			if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
				out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

	int audioindex_a=-1,audioindex_out=-1;
	//打开输出文件(Open output file)
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
			printf( "Could not open output file '%s'", out_filename);
			return 0;
		}
	}
	//写文件头(Write file header)
	if (avformat_write_header(ofmt_ctx, NULL) < 0) {
		printf( "Error occurred when opening output file\n");
		return 0;
	}
	int frame_index=0;
	int64_t cur_pts_v=0,cur_pts_a=0;
//
#endif
	while(1)
	{
	//	printf("while\n");
		QTime time__;
		time__.start();
		if(!goOn && n>=m_bmpMap.size() )
			break;
		QTime time;
		time.start();
		
		
		
	int frameMapNum;
	for(frameMapNum=n; frameMapNum>=m_threadNum; frameMapNum-=m_threadNum)
	{
		;
	}
	QMutexLocker lo(&m_mutex);
	receiveFrame = m_threadList[frameMapNum]->m_readFrameMap.value(n);
	lo.unlock();
	static unsigned long  inNullTime = 0;
	if(receiveFrame == NULL)
	{
		if(inNullTime ==0)
			inNullTime = allTime.elapsed();
		//如果等待线程读取BMP时间超过30S,退出
		else	if( (allTime.elapsed()-inNullTime) >30000)
		{
			char *err = "wait time too long,receive frame data failure\n";
			writeMsg(err);
			exit(1);
		}
		continue;
	}
	else
	{
		inNullTime = 0;
	}
	std::cout<<"time 1 :"<width, receiveFrame->height);
		uint8_t* buffer = (uint8_t*)av_malloc(numBytes);
		yuvFrame = av_frame_alloc();
		avpicture_fill((AVPicture*)yuvFrame, buffer, AV_PIX_FMT_YUV444P,receiveFrame->width, receiveFrame->height);

		struct SwsContext *img_convert_ctx = NULL;
		img_convert_ctx =
			sws_getCachedContext(img_convert_ctx, receiveFrame->width,
			receiveFrame->height, /*AV_PIX_FMT_RGB32*/(AVPixelFormat)receiveFrame->format,
			receiveFrame->width, receiveFrame->height,
			AV_PIX_FMT_YUV444P, SWS_SPLINE/*SWS_POINT*//*SWS_BICUBIC*/,
			NULL, NULL, NULL);

		if( !img_convert_ctx ) {
			//fprintf(stderr, "Cannot initialize sws conversion context\n");
			writeMsg("Cannot initialize sws conversion context\n");
			exit(1);
		}
		sws_scale(img_convert_ctx, (const uint8_t* const*)receiveFrame->data,
						receiveFrame->linesize, 0, receiveFrame->height, yuvFrame->data,
						yuvFrame->linesize);

		sws_freeContext(img_convert_ctx);
		yuvFrame->pts=n;
		yuvFrame->pkt_dts=n;
		AVPacket pkt;
		av_new_packet(&pkt,receiveFrame->width*receiveFrame->height*3); 
		int isOK;
		time.restart();
		if(avcodec_encode_video2(codecCtx, &pkt, yuvFrame, &isOK)==0)
		{
			std::cout<<"\n  encodec time:  "<index;

				//把pts dts的设置放到Frame,这里接收的是延时的帧,不对应当前帧
				pkt.pts ;
				pkt.dts ;
				
			//	av_write_frame(fmt, &pkt);
				/************************************************************************/
				/* AVI写                                                                     */
				/************************************************************************/
				pkt.stream_index=videoindex_out;
				printf("Write 1 Packet. size:%5d\tpts:%8d\n",pkt.size,pkt.pts);
				//写入(Write)
				if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
					printf( "Error muxing packet\n"); 
					break;
				}
				//
				printf("num: %d\t",n+1);
				if(n%10 == 0)
					std::cout<<"\n";
				okNum++;
				std::cout<<".........................write :"<data[0]);
		av_free(receiveFrame->data[0]);
		av_free(yuvFrame);	
		
		av_free(receiveFrame);
		//加了就报错,不知为何
	//	sws_freeContext(img_convert_ctx);
		QMutexLocker locker(&m_mutex);
		  m_threadList[frameMapNum]->m_readFrameMap.remove(n);
		locker.unlock();
		receiveFrame = NULL;
		yuvFrame = NULL;
		av_free_packet(&pkt);
		n++;
		qDebug()<<"-------------------------------------------while  time:"<streams[stream_index]->codec->codec->capabilities &  
		CODEC_CAP_DELAY))  
		return 0;  
	while (1) {  
		static int dtsNum = 0;
		AVPacket enc_pkt;  
		enc_pkt.data = NULL;  
		enc_pkt.size = 0;  
		av_init_packet(&enc_pkt);  
		ret = avcodec_encode_video2 (fmt_encode->streams[stream_index]->codec, &enc_pkt,  
			NULL, &got_frame);  
		/*	enc_pkt.dts = dtsNum;
		enc_pkt.pts=dtsNum;*/
		av_frame_free(NULL);  
		if (ret < 0)  
			break;  
		if (!got_frame){  
			ret=0;  
			break;  
		}  
		printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n",enc_pkt.size);  
		/* mux encoded frame */  
		
		ret = av_interleaved_write_frame(fmt_write, &enc_pkt);  
		av_free_packet(&enc_pkt);
		if (ret < 0)  
			break;  
		dtsNum++;
		okNum++;

	}  

	return ret;  
}  
int MakeVideo::videoMuxer( const char* inVideoName, const char* outVideoName)
{
	QTime time;
	time.start();
    AVOutputFormat *ofmt = NULL;
    //输入对应一个AVFormatContext,输出对应一个AVFormatContext
    //(Input AVFormatContext and Output AVFormatContext)
    AVFormatContext *ifmt_ctx_v = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret, i;
     

	 
   const  char *in_filename_v = inVideoName;//输入文件名(Input file URL)
    
  const   char *out_filename = outVideoName;//输出文件名(Output file URL)
    av_register_all();
    //输入(Input)
    if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
        printf( "Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {
        printf( "Failed to retrieve input stream information");
        goto end;
    }
 
    printf("Input Information=====================\n");
    av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0);
    printf("======================================\n");
    //输出(Output)
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        printf( "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;
    int videoindex_v=-1,videoindex_out=-1;
    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        //根据输入流创建输出流(Create output AVStream according to input AVStream)
        if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
        videoindex_v=i;
        AVStream *in_stream = ifmt_ctx_v->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream) {
            printf( "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        videoindex_out=out_stream->index;
        //复制AVCodecContext的设置(Copy the settings of AVCodecContext)
        if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
            printf( "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        break;
        }
    }
 
    int audioindex_a=-1,audioindex_out=-1;
    //输出一下格式------------------
    printf("Output Information====================\n");
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    printf("======================================\n");
    //打开输出文件(Open output file)
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
            printf( "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    //写文件头(Write file header)
    if (avformat_write_header(ofmt_ctx, NULL) < 0) {
        printf( "Error occurred when opening output file\n");
        goto end;
    }
    int frame_index=0;
    int64_t cur_pts_v=0,cur_pts_a=0;
 
    //FIX
#if USE_H264BSF
    AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb"); 
#endif
#if USE_AACBSF
    AVBitStreamFilterContext* aacbsfc =  av_bitstream_filter_init("aac_adtstoasc"); 
#endif
 
    while (1) {
        AVFormatContext *ifmt_ctx;
        int stream_index=0;
        AVStream *in_stream, *out_stream;
 
 
        //获取一个AVPacket(Get an AVPacket)
        if(1){
            ifmt_ctx=ifmt_ctx_v;
            stream_index=videoindex_out;
 
            if(av_read_frame(ifmt_ctx, &pkt) >= 0){
                do{
                    if(pkt.stream_index==videoindex_v){
                        cur_pts_v=pkt.pts;
                        break;
                    }
                }while(av_read_frame(ifmt_ctx, &pkt) >= 0);
            }else{
                break;
            }
		}/*else{
		 ifmt_ctx=ifmt_ctx_a;
		 stream_index=audioindex_out;
		 if(av_read_frame(ifmt_ctx, &pkt) >= 0){
		 do{
		 if(pkt.stream_index==audioindex_a){
		 cur_pts_a=pkt.pts;
		 break;
		 }
		 }while(av_read_frame(ifmt_ctx, &pkt) >= 0);
		 }else{
		 break;
		 }

		 }*/
 
        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[stream_index];
//FIX
#if USE_H264BSF
        av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
#if USE_AACBSF
        av_bitstream_filter_filter(aacbsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
        //FIX:No PTS (Example: Raw H.264)
        //Simple Write PTS
        if(pkt.pts==AV_NOPTS_VALUE){
            //Write PTS
            AVRational time_base1=in_stream->time_base;
            //Duration between 2 frames (us)
            int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
            //Parameters
            pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
            pkt.dts=pkt.pts;
            pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
            frame_index++;
        }
        /* copy packet */
        //转换PTS/DTS(Convert PTS/DTS)
            pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        pkt.stream_index=stream_index;
 
        printf("Write 1 Packet. size:%5d\tpts:%8d\n",pkt.size,pkt.pts);
        //写入(Write)
        if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
           printf( "Error muxing packet\n"); 
            break;
        }
        av_free_packet(&pkt);
 
    }
    //写文件尾(Write file trailer)
    av_write_trailer(ofmt_ctx);
 
#if USE_H264BSF
    av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
    av_bitstream_filter_close(aacbsfc);
#endif
 
end:
    avformat_close_input(&ifmt_ctx_v);
  //  avformat_close_input(&ifmt_ctx_a);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF) {
        printf( "Error occurred.\n");
        return -1;
    }
	printf("\nall time:%d\n",time.elapsed());
	
    return 0;
}


你可能感兴趣的:(FFMPEG,QT)