本代码基于windows环境,代码中间的注释都是来自ffmpeg官方例子,没有删掉,仅做参考。源码下载地址:http://download.csdn.net/detail/jw20082009jw/9720626
自己录制了一段比较小的mp4视频,因为公司的目标就是对小视频做处理,所以我暂时不会去研究我当前代码对大视频情况下的影响。
本程序基于ffmpeg官方例子demuxing_decoding.c文件来改写的,可以将mp4文件分离成未编码的视频裸流和音频裸流,另外增加了提取mp4关键帧,并保存为jpg格式图片的部分代码。
输入参数两个:待处理的MP4文件 保存生成图片和裸流文件的地址
输出文件:mp4.vr视频裸流文件、mp4.ar音频裸流文件、其余.jpg后缀的为关键帧提取后图片数据文件名为该帧pts值
pts(Presentation time stamp):视频帧的展示时间
vr文件的播放:可以使用ffplay来进行播放,具体命令会在程序执行完后有提示(例如我的视频参数是如下命令:ffplay -f rawvideo -pix_fmt yuv420p -video_size 320x240 F:\ffmpeg\ffmpegtest\mp4.vr)
ar文件的播放也和vr文件的播放同样会在执行完后有提示ffplay的播放方法(我的音频参数如下:ffplay -f f32le -ac 1 -ar 8000 F:\ffmpeg\ffmpegtest\mp4.ar)
具体代码如下:
/**
* @file
* Demuxing and decoding example.
*
* Show how to use the libavformat and libavcodec API to demux and
* decode audio and video data.
* @example demuxing_decoding.c
*/
#include
#include
#include
#include
static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
static int width, height;
static enum AVPixelFormat pix_fmt;
static AVStream *video_stream = NULL, *audio_stream = NULL;
static const char *src_filename = NULL;
static char video_dst_filename[50] = {0};
static char audio_dst_filename[50] = {0};
static char *dst_pathname = NULL;
static FILE *video_dst_file = NULL;
static FILE *audio_dst_file = NULL;
static FILE *video_dst_filetemp = NULL;
static uint8_t *video_dst_data[4] = {NULL};
static int video_dst_linesize[4];
static int video_dst_bufsize;
static int video_stream_idx = -1, audio_stream_idx = -1;
static AVFrame *frame = NULL;
static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;
static AVCodecContext* pCodecCtx = NULL;
static AVCodec* pCodec = NULL;
static AVPacket pkt_out;
static char file_name[50] = {0};
/* Enable or disable frame reference counting. You are not supposed to support
* both paths in your application but pick the one most appropriate to your
* needs. Look for the use of refcount in this example to see what are the
* differences of API usage between them. */
static int refcount = 0;
static void save_picture_uinit(FILE * pFile,AVPacket pkt)
{
fwrite(pkt.data, sizeof(uint8_t),pkt.size, pFile);
fclose(pFile);
pFile = NULL;
av_free_packet(&pkt);
}
void generate_file_name(char * file_name,char * file_path,long long pts)
{
if (file_path[strlen(file_path) - 1] == '\\')
{
sprintf(file_name,"%s%lld.jpg",file_path,pts);
}
else
{
sprintf(file_name,"%s\\%lld.jpg",file_path,pts);
}
}
static int decode_packet(int *got_frame, int cached)
{
int ret = 0;
int decoded = pkt.size;
*got_frame = 0;
if (pkt.stream_index == video_stream_idx) {
/* decode video frame */
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
//fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
return ret;
}
if (*got_frame) {
if (frame->width != width || frame->height != height ||
frame->format != pix_fmt) {
/* To handle this change, one could call av_image_alloc again and
* decode the following frames into another rawvideo file. */
fprintf(stderr, "Error: Width, height and pixel format have to be "
"constant in a rawvideo file, but the width, height or "
"pixel format of the input video changed:\n"
"old: width = %d, height = %d, format = %s\n"
"new: width = %d, height = %d, format = %s\n",
width, height, av_get_pix_fmt_name(pix_fmt),
frame->width, frame->height,
av_get_pix_fmt_name(frame->format));
return -1;
}
printf("video_frame%s n:%d coded_n:%d\n",
cached ? "(cached)" : "",
video_frame_count++, frame->coded_picture_number);
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
av_image_copy(video_dst_data, video_dst_linesize,
(const uint8_t **)(frame->data), frame->linesize,
pix_fmt, width, height);
//key frame
if(pkt.flags == 1){
av_new_packet(&pkt_out,pCodecCtx->width * pCodecCtx->height * 3);
ret = avcodec_encode_video2(pCodecCtx, &pkt_out,frame, got_frame);
if(ret < 0)
{
printf("Encode Error.\n");
avcodec_close(pCodecCtx);
av_free(pCodecCtx);
av_free_packet(&pkt_out);
return 0;
}
//success encode
if (got_frame)
{
generate_file_name(file_name,dst_pathname,pkt.pts);
//Method 1
//save_picture_init(&pFormatCtx,video_st,pCodecCtx,file_name);
//save_picture_uinit_2(pFormatCtx,pkt_out);
//Method 2
video_dst_filetemp = fopen(file_name, "wb");
if (!video_dst_filetemp) {
fprintf(stderr, "Could not open destination file %s\n", video_dst_filetemp);
}
if(video_dst_filetemp)
{
save_picture_uinit(video_dst_filetemp,pkt_out);
}
}
}
/* write to rawvideo file */
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
}
} else if (pkt.stream_index == audio_stream_idx) {
/* decode audio frame */
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
//fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
return ret;
}
/* Some audio decoders decode only part of the packet, and have to be
* called again with the remainder of the packet data.
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
* Also, some decoders might over-read the packet. */
decoded = FFMIN(ret, pkt.size);
if (*got_frame) {
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
/*
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
cached ? "(cached)" : "",
audio_frame_count++, frame->nb_samples,
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
*/
/* Write the raw audio data samples of the first plane. This works
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
* most audio decoders output planar audio, which uses a separate
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
* In other words, this code will write only the first audio channel
* in these cases.
* You should use libswresample or libavfilter to convert the frame
* to packed data. */
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
}
}
/* If we use frame reference counting, we own the data and need
* to de-reference it when we don't use it anymore */
if (*got_frame && refcount)
av_frame_unref(frame);
return decoded;
}
static int open_codec_context(int *stream_idx,
AVCodecContext **dec_ctx, AVFormatContext *fmt_ctx, enum AVMediaType type)
{
int ret, stream_index;
AVStream *st;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret;
} else {
stream_index = ret;
st = fmt_ctx->streams[stream_index];
/* find decoder for the stream */
dec = avcodec_find_decoder(st->codecpar->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
}
/* Allocate a codec context for the decoder */
*dec_ctx = avcodec_alloc_context3(dec);
if (!*dec_ctx) {
fprintf(stderr, "Failed to allocate the %s codec context\n",
av_get_media_type_string(type));
return AVERROR(ENOMEM);
}
/* Copy codec parameters from input stream to output codec context */
if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
fprintf(stderr, "Failed to copy %s codec parameters to decoder context\n",
av_get_media_type_string(type));
return ret;
}
/* Init the decoders, with or without reference counting */
av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
*stream_idx = stream_index;
}
return 0;
}
static int get_format_from_sample_fmt(const char **fmt,
enum AVSampleFormat sample_fmt)
{
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr,
"sample format %s is not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return -1;
}
int main (int argc, char **argv)
{
int ret = 0, got_frame;
if (argc != 3 && argc != 4) {
fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decodes them, and writes decoded\n"
"video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n\n"
"If the -refcount option is specified, the program use the\n"
"reference counting frame system which allows keeping a copy of\n"
"the data for longer than one decode call.\n"
"\n", argv[0]);
exit(1);
}
if (argc == 4 && !strcmp(argv[1], "-refcount")) {
refcount = 1;
argv++;
}
src_filename = argv[1];
dst_pathname = argv[2];
sprintf(video_dst_filename,"%s\\mp4.vr",argv[2]);
sprintf(audio_dst_filename,"%s\\mp4.ar",argv[2]);
/* register all formats and codecs */
av_register_all();
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", src_filename);
exit(1);
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
video_dst_file = fopen(video_dst_filename, "wb");
if (!video_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
ret = 1;
goto end;
}
/* allocate image where the decoded image will be put */
width = video_dec_ctx->width;
height = video_dec_ctx->height;
pix_fmt = video_dec_ctx->pix_fmt;
ret = av_image_alloc(video_dst_data, video_dst_linesize,
width, height, pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
video_dst_bufsize = ret;
}
if (open_codec_context(&audio_stream_idx, &audio_dec_ctx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
ret = 1;
goto end;
}
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (!audio_stream && !video_stream) {
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
ret = 1;
goto end;
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate frame\n");
ret = AVERROR(ENOMEM);
goto end;
}
/* initialize packet, set data to NULL, let the demuxer fill it */
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (video_stream)
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
if (audio_stream)
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
/* find the mpeg1 video encoder */
pCodec = avcodec_find_encoder(AV_CODEC_ID_MJPEG);
if (!pCodec)
{
printf("Call avcodec_find_encoder function failed!\n");
return 0;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx)
{
printf("Call avcodec_alloc_context3 function failed!\n");
return 0;
}
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ420P;
//If you want to change the image size can do the zoom function
pCodecCtx->width = fmt_ctx->streams[video_stream_idx]->codec->width;
pCodecCtx->height = fmt_ctx->streams[video_stream_idx]->codec->height;
pCodecCtx->time_base.num = 1;
pCodecCtx->time_base.den = 15;
//Output some information
if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0)
{
printf("Could not open codec.\n");
return 0;
}
av_init_packet(&pkt_out);
/* read frames from the file */
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
AVPacket orig_pkt = pkt;
do {
ret = decode_packet(&got_frame, 0);
if (ret < 0)
break;
pkt.data += ret;
pkt.size -= ret;
} while (pkt.size > 0);
av_packet_unref(&orig_pkt);
}
/* flush cached frames */
pkt.data = NULL;
pkt.size = 0;
do {
decode_packet(&got_frame, 1);
} while (got_frame);
printf("Demuxing succeeded.\n");
if (video_stream) {
printf("Play the output video file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
av_get_pix_fmt_name(pix_fmt), width, height,
video_dst_filename);
}
if (audio_stream) {
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
int n_channels = audio_dec_ctx->channels;
const char *fmt;
if (av_sample_fmt_is_planar(sfmt)) {
const char *packed = av_get_sample_fmt_name(sfmt);
printf("Warning: the sample format the decoder produced is planar "
"(%s). This example will output the first channel only.\n",
packed ? packed : "?");
sfmt = av_get_packed_sample_fmt(sfmt);
n_channels = 1;
}
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
goto end;
printf("Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
fmt, n_channels, audio_dec_ctx->sample_rate,
audio_dst_filename);
}
end:
avcodec_free_context(&video_dec_ctx);
avcodec_free_context(&audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
av_frame_free(&frame);
av_free(video_dst_data[0]);
return ret < 0;
}
源码下载地址:http://download.csdn.net/detail/jw20082009jw/9720626