一个简单的视频解码以及保存为yuv420的小demo
//
// main.cpp
// ffmpeg02
//
// Created by 史明 on 16/10/31.
// [email protected]
// gcc -o main.out main.c -lavutil -lavformat -lavcodec -lz
// compile on mac OS X EI capitan 10.11.6 as following.
// gcc -o main.out main.c `pkg-config --cflags \
// --libs sdl libavcodec
// libavformat libavutil ` -lz -lm
// this demo use libavformat and libavcodec to
// read video from a file.
// Copyright © 2016年 史明. All rights reserved.
//
#include
#define __STDC_CONSTANT_MACROS
#ifdef _WIN32
//Windows system
extern "C"
{
#include
#include "libavformat/avformat.h"
};
#else //_WIN32
//Linux system
#ifdef __cplusplus
extern "C"
{
#endif
#include
#include "libavformat/avformat.h"
#ifdef __cplusplus
};
#endif
#endif//_WIN32
#define FRAMECOUNT 5
using namespace std;
//replace the path with your video file
const char *filepath = "/Users/shiming/ffmpegResearch/FFmpeg-Tutorial/ss1-part.mp4";
bool isRuning = true;
void saveFrame(AVFrame * pFrame, int width, int height, int format,int iFrame) {
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "frameout%d.yuv", iFrame);
pFile=fopen(szFilename, "ab");
if(pFile==NULL)
return;
// Write pixel data
for(y=0; ydata[0]+y*pFrame->linesize[0], 1, width, pFile);
for(y=0; y2; y++) {
fwrite(pFrame->data[1]+y*pFrame->linesize[1], 1, width / 2, pFile);
}
for(y=0; y2; y++) {
fwrite(pFrame->data[2]+y*pFrame->linesize[2], 1, width / 2, pFile);
}
// Close file
fclose(pFile);
}
//需要转化为rgb的数据才可以存储
void saveImg(AVFrame *pFrame, int width, int height, int iFrame) {
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
for(y=0; ydata[0]+y*pFrame->linesize[0], 1, width*3, pFile);
// Close file
fclose(pFile);
}
void *decodeThread(void *arg) {
AVFrame* pFrame = av_frame_alloc();
AVCodecContext* pCodecCtx = (AVCodecContext*)arg;
int ret = -1;
int i = 0;
while (isRuning) {
ret = avcodec_receive_frame(pCodecCtx, pFrame);
switch (ret) {
case 0://成功
printf("receive a frame !\n");
saveFrame(pFrame, pCodecCtx->width, pCodecCtx->height, pCodecCtx->colorspace, i++);
break;
case AVERROR_EOF:
printf("the decoder has been fully flushed,\
and there will be no more output frames.\n");
break;
case AVERROR(EAGAIN):
printf("Resource temporarily unavailable\n");
break;
case AVERROR(EINVAL):
printf("Invalid argument\n");
break;
default:
break;
}
}
return nullptr;
}
int main(int argc, const char * argv[]) {
std::cout << "Hello, World!\n";
int errCode = 0;
AVFormatContext *pFmtCtx;
AVInputFormat *pInputFmt;
AVStream *pStream;
AVCodec *pCodec;
AVCodecContext *pCodecCtx;
AVFrame *pFrame;
AVPacket *packet;
AVCodecParameters *pCodecPar;
pthread_t threadID;
//注册所有的格式和编解码器
av_register_all();
//打开文件
if((errCode = avformat_open_input(&pFmtCtx, filepath, pInputFmt, NULL)) < 0) {
printf("open input file fail , err code : %d\n", errCode);
return errCode;
};
if((errCode = avformat_find_stream_info(pFmtCtx, NULL)) < 0){
printf("find stream fail, err code : %d\n", errCode);
return errCode;
};
// Dump information about file onto standard error
av_dump_format(pFmtCtx, 0, filepath, 0);
int i;
int videoStreamID = -1;
for ( i = 0; i < pFmtCtx->nb_streams; ++i) {
if(pFmtCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamID = i;
break;
}
}
printf("the first video stream index is : %d\n", videoStreamID);
pCodecPar = pFmtCtx->streams[videoStreamID]->codecpar;
pStream = pFmtCtx->streams[videoStreamID];
printf("codec Par :%d %d, format %d\n", pCodecPar->width,
pCodecPar->height, pCodecPar->format);
pCodec = avcodec_find_decoder(pStream->codecpar->codec_id);
pCodecCtx = avcodec_alloc_context3(pCodec);
if((errCode = avcodec_parameters_to_context(pCodecCtx, pStream->codecpar)) < 0) {
printf("copy the codec parameters to context fail, err code : %d\n", errCode);
return errCode;
}
if((errCode = avcodec_open2(pCodecCtx, pCodec, NULL)) < 0) {
printf("open codec fail , err code : %d", errCode);
}
pFrame = av_frame_alloc();
//pthread_create(&threadID, NULL, decodeThread, pCodecCtx);
i = 0;
packet = av_packet_alloc();
while (av_read_frame(pFmtCtx, packet) >= 0) {
if(packet->stream_index == videoStreamID) {
avcodec_send_packet(pCodecCtx, packet);
errCode = avcodec_receive_frame(pCodecCtx, pFrame);
switch (errCode) {
case 0://成功
printf("got a frame !\n");
if (i++ < FRAMECOUNT) {
saveFrame(pFrame, pCodecPar->width,
pCodecPar->height, pCodecPar->format, i);
}
break;
case AVERROR_EOF:
printf("the decoder has been fully flushed,\
and there will be no more output frames.\n");
break;
case AVERROR(EAGAIN):
printf("Resource temporarily unavailable\n");
break;
case AVERROR(EINVAL):
printf("Invalid argument\n");
break;
default:
break;
}
}
av_packet_unref(packet);
}
isRuning = false;
av_free(pFrame);
avcodec_close(pCodecCtx);
avformat_close_input(&pFmtCtx);
//pthread_join(threadID, NULL);
std::cout << "Bye, World!\n";
return 0;
}
早起的版本是使用
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket*avpkt);
我们需要got_picture_ptr来判断解码是否成功,但是这是一个阻塞的函数。然而在新的版本中是使用
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt);
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame);
avcodec_send_packet和avcodec_receive_frame一定要使用avcodec_open2打开编解码器。avcodec_send_packet提供原始数据作为解码器的输入。输入的AVPacket可能是单独的一个视频帧,也可能是多个音频帧。输入一个flush帧标志着数据流结束。可以把avpacket置空或者data置空或者size设置成0表示数据流结束的flush帧。avcodec_receive_frame获取解码后的数据帧。返回值如上面的源码。
通过实验发现,这两个函数不能在不同的线程去调用,达到异步的效果。也可能着两个函数只是实现异步的操作的第一步,底层将来可能会支持异步操作。
劳动可贵,欢迎转载,请注明出处~
http://blog.csdn.net/minger1202/article/details/52468986