通过ffmpeg实现解码并保存为yuv文件
/**
* 抽取ffmpeg中的函数接口实现视频解码并保存
*/
#define __STDC_CONSTANT_MACROS
#include
extern "C"
{
#include "libavutil/avutil.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
//新版里的图像转换结构需要引入的头文件
#include "libswscale/swscale.h"
};
char* filename = "720p.m2v";
int main()
{
AVCodec *pCodec; //解码器指针
AVCodecContext* pCodecCtx; //ffmpeg解码类的类成员
AVFrame* pAvFrame; //多媒体帧,保存解码后的数据帧
AVFormatContext* pFormatCtx; //保存视频流的信息
av_register_all(); //注册库中所有可用的文件格式和编码器
pFormatCtx = avformat_alloc_context();
if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) != 0) { //检查文件头部
printf("Can't find the stream!\n");
}
if (av_find_stream_info(pFormatCtx) < 0) { //查找流信息
printf("Can't find the stream information !\n");
}
int videoindex = -1;
for (int i=0; i < pFormatCtx->nb_streams; ++i) //遍历各个流,找到第一个视频流,并记录该流的编码信息
{
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoindex = i;
break;
}
}
if (videoindex == -1) {
printf("Don't find a video stream !\n");
return -1;
}
pCodecCtx = pFormatCtx->streams[videoindex]->codec; //得到一个指向视频流的上下文指针
pCodec = avcodec_find_decoder(pCodecCtx->codec_id); //到该格式的解码器
if (pCodec == NULL) {
printf("Cant't find the decoder !\n"); //寻找解码器
return -1;
}
if (avcodec_open2(pCodecCtx,pCodec,NULL) < 0) { //打开解码器
printf("Can't open the decoder !\n");
return -1;
}
pAvFrame = avcodec_alloc_frame(); //分配帧存储空间
AVFrame* pFrameBGR = avcodec_alloc_frame(); //存储解码后转换的RGB数据
AVFrame* pFrameYUV;
int sizeYUV = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
pFrameYUV = avcodec_alloc_frame();
printf(" sizeYUV: %d\n", sizeYUV);
uint8_t *out_yuv_buffer = (uint8_t *)av_malloc(sizeYUV);
avpicture_fill((AVPicture *)pFrameYUV, out_yuv_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
FILE* fpYUV = fopen("demo.yuv", "wb+");
if (!fpYUV)
{
printf("文件打开失败!\n");
return -1;
}
AVPacket* packet = (AVPacket*)malloc(sizeof(AVPacket));
printf("-----------输出文件信息---------\n");
av_dump_format(pFormatCtx, 0, filename, 0);
printf("------------------------------");
struct SwsContext *img_convert_ctx;
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
int ret;
int got_picture;
for (;;)
{
if(av_read_frame(pFormatCtx, packet)>=0)
{
if(packet->stream_index==videoindex)
{
ret = avcodec_decode_video2(pCodecCtx, pAvFrame, &got_picture, packet);
if(ret < 0)
{
printf("Decode Error.(解码错误)\n");
return -1;
}
if (got_picture)
{
printf(" %d ", got_picture);
sws_scale(img_convert_ctx, (const uint8_t* const*)pAvFrame->data, pAvFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
int size_temp;
size_temp = pCodecCtx->height * pCodecCtx->width;
fwrite(pFrameYUV->data[0], 1,size_temp, fpYUV);
fwrite(pFrameYUV->data[1], 1,size_temp/4, fpYUV);
fwrite(pFrameYUV->data[2], 1,size_temp/4, fpYUV);
}
}
av_free_packet(packet);
}
else
{
break;
}
}
//FIX: Flush Frames remained in Codec
while (1)
{
ret = avcodec_decode_video2(pCodecCtx, pAvFrame, &got_picture, packet);
if (ret < 0)
break;
if (!got_picture)
break;
sws_scale(img_convert_ctx, (const uint8_t* const*)pAvFrame->data, pAvFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
int y_size=pCodecCtx->width*pCodecCtx->height;
fwrite(pFrameYUV->data[0],1,y_size,fpYUV); //Y
fwrite(pFrameYUV->data[1],1,y_size/4,fpYUV); //U
fwrite(pFrameYUV->data[2],1,y_size/4,fpYUV); //V
}
av_free(pFrameYUV);
av_free(pAvFrame);
av_free(out_yuv_buffer);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
sws_freeContext(img_convert_ctx);
fclose(fpYUV);
return 0;
}