【音视频】FFmpeg 安装+Api使用+Android+笔记( 一)
FFmpeg开发Api文档
2.1 FFmpeg 打印音/视频信息(Meta)
AVFormatContext 上下文结构体,封装文件的信息
av_register_all() 只有调用了该函数,才能使用复用器,编码器等, 注册所有的格式。包括解封装格式和加封装格式
avformat_open_input()。打开一个文件并解析。可解析的内容包括:视频流、音频流、视频流参数、音频流参数、视频帧索引
avformat_find_stream_info(),查找格式和索引。有些早期格式它的索引并没有放到头当中,需要你到后面探测,就会用到函数
avformat_close_input () 关闭打开的解析文件,释放资源
代码:
#include
#include
#include
int main(int argc , char *argv[])
{
av_log_set_level(AV_LOG_DEBUG);
int ret;
char errors[1024];
int err_code;
AVFormatContext *fmt_ctx = NULL;
//从控制台获取参数
if(argc < 2 )
{
av_log(NULL,AV_LOG_ERROR,"[Usage]you should input media file!\n");
}
char *scr_filename = argv[1];
av_register_all();
//打开 需要解析的文件
if((err_code = avformat_open_input(&fmt_ctx,scr_filename,NULL,NULL)) < 0 )
{
av_strerror(err_code ,errors,1024);
av_log(NULL,AV_LOG_DEBUG,"Could not open source file %s",errors);
exit(1);
}
if((err_code = avformat_find_stream_info(fmt_ctx,NULL)) < 0 )
{
av_strerror(err_code ,errors,1024);
av_log(NULL,AV_LOG_DEBUG,"Could not open source file %s",errors);
exit(1);
}
//打印信息
av_dump_format(fmt_ctx,0,scr_filename,0);
//close input file
avformat_close_input(&fmt_ctx);
return 0;
}
编译:gcc -g -o test test.c -I/home/FFmpeg/ffmpeg/ffmpeg-4.1.3 -lavutil -lavformat
执行: ./test test.wmv [test.wmv 为当前目录下的测试的视频文件]
2.2 FFmpeg 抽取音频数据
av_init_packet(&pkt); 初始化流中读取的数据包
av_find_best_stream(), 当视频被解封装出来后,需要分开处理音频和视频,需要找到最优的一条音频流和视频流
【 AVMEDIA_TYPE_AUDIO 音频 AVMEDIA_TYPE_VIDEO 视频 】
av_read_frame(AVFormatContext *s, AVPacket *pkt) 读取音视频的帧数据
av_packet_unref(&pkt) 手动对 AVPacket的计数引用-1 ,防止内存泄露。
代码:
#include
#include
#include
#include
#define ADTS_HEADER_LEN 7;
//添加dts头,增加音频的参数 声道数,采样率等
void adts_header(char *szAdtsHeader, int dataLen){
int audio_object_type = 2;
int sampling_frequency_index = 7;
int channel_config = 2;
int adtsLen = dataLen + 7;
szAdtsHeader[0] = 0xff; //syncword:0xfff 高8bits
szAdtsHeader[1] = 0xf0; //syncword:0xfff 低4bits
szAdtsHeader[1] |= (0 << 3); //MPEG Version:0 for MPEG-4,1 for MPEG-2 1bit
szAdtsHeader[1] |= (0 << 1); //Layer:0 2bits
szAdtsHeader[1] |= 1; //protection absent:1 1bit
szAdtsHeader[2] = (audio_object_type - 1)<<6; //profile:audio_object_type - 1 2bits
szAdtsHeader[2] |= (sampling_frequency_index & 0x0f)<<2; //sampling frequency index:sampling_frequency_index 4bits
szAdtsHeader[2] |= (0 << 1); //private bit:0 1bit
szAdtsHeader[2] |= (channel_config & 0x04)>>2; //channel configuration:channel_config 高1bit
szAdtsHeader[3] = (channel_config & 0x03)<<6; //channel configuration:channel_config 低2bits
szAdtsHeader[3] |= (0 << 5); //original:0 1bit
szAdtsHeader[3] |= (0 << 4); //home:0 1bit
szAdtsHeader[3] |= (0 << 3); //copyright id bit:0 1bit
szAdtsHeader[3] |= (0 << 2); //copyright id start:0 1bit
szAdtsHeader[3] |= ((adtsLen & 0x1800) >> 11); //frame length:value 高2bits
szAdtsHeader[4] = (uint8_t)((adtsLen & 0x7f8) >> 3); //frame length:value 中间8bits
szAdtsHeader[5] = (uint8_t)((adtsLen & 0x7) << 5); //frame length:value 低3bits
szAdtsHeader[5] |= 0x1f; //buffer fullness:0x7ff 高5bits
szAdtsHeader[6] = 0xfc;
}
int main(int argc , char *argv[])
{
av_log_set_level(AV_LOG_DEBUG);
int ret;
char errors[1024];
int err_code;
char *src_filename = NULL;
char *dst_filename = NULL;
FILE *dst_fd = NULL;
int audio_stream_index = -1 ;
int len;
AVFormatContext *ofmt_ctx = NULL;
AVOutputFormat *output_fmt = NULL;
AVStream *out_stream =NULL;
AVFormatContext *fmt_ctx = NULL;
AVFrame *frame =NULL;
AVPacket pkt;
//consl input error 读取控制台输入的参数 1:需要分离的源文件2:目标文件
if(argc < 3 )
{
av_log(NULL,AV_LOG_ERROR,"[Usage]you should input media file!\n");
return -1;
}
//read file path
src_filename = argv[1];
dst_filename = argv[2];
if(src_filename == NULL || dst_filename == NULL){
av_log(NULL, AV_LOG_DEBUG, "src or dts file is null, plz check them!\n");
return -1;
}
av_register_all();
dst_fd = fopen(dst_filename ,"wb");
if (!dst_fd) {
av_log(NULL, AV_LOG_DEBUG, "Could not open destination file %s\n", dst_filename);
return -1;
}
/*open input media file, and allocate format context*/
if((err_code = avformat_open_input(&fmt_ctx, src_filename, NULL, NULL)) < 0){
av_strerror(err_code, errors, 1024);
av_log(NULL, AV_LOG_DEBUG, "Could not open source file: %s, %d(%s)\n",
src_filename,
err_code,
errors);
return -1;
}
/*retrieve audio stream*/
if((err_code = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_strerror(err_code, errors, 1024);
av_log(NULL, AV_LOG_DEBUG, "failed to find stream information: %s, %d(%s)\n",
src_filename,
err_code,
errors);
return -1;
}
av_dump_format(fmt_ctx,0,src_filename,0);
frame = av_frame_alloc();
if(!frame)
{
av_log(NULL, AV_LOG_DEBUG, "Could not allocate frame\n");
}
//init packet
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0 ;
/*find best audio stream*/
audio_stream_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO,-1,-1,NULL,0);
if(audio_stream_index < 0 )
{
av_log(NULL, AV_LOG_DEBUG, "Could not find %s stream in input file %s\n",
av_get_media_type_string(AVMEDIA_TYPE_AUDIO),
src_filename);
return AVERROR(EINVAL);
}
while(av_read_frame(fmt_ctx,&pkt) >= 0 ){
if(pkt.stream_index == audio_stream_index){
//每次写入都需要写一次dts
char adts_header_buf[ADTS_HEADER_LEN];
adts_header(adts_header_buf, pkt.size);
fwrite(adts_header_buf, 1, 7, dst_fd);
len = fwrite( pkt.data, 1, pkt.size, dst_fd);
if(len != pkt.size){
av_log(NULL, AV_LOG_DEBUG, "warning, length of writed data isn't equal );
}
}
av_packet_unref(&pkt);
}
//close input file
avformat_close_input(&fmt_ctx);
if(dst_fd) {
fclose(dst_fd);
}
return 0;
}
编译: gcc -g -o test test.c -I/home/FFmpeg/ffmpeg/ffmpeg-4.1.3 -lavutil -lavformat -lavcodec
执行 : ./test test.wmv myaudio.acc 最后会把 test.wmv文件中的音频分离出来 并写入了 myaudio.acc
执行:ffplay myaudio.acc 可以发现有音频声音
2.3 FFmpeg 抽取H264视频频数据
使用ffmpeg读取H264并不能直接得到NALU单元,必须从读取出来的AVPacket与AVFormatContext->streams[video_index]->codec->extradata提取出来。
extradata 存放着NALU,读取完之 识别&读取SPS/PPS 在之前加上4个字节0000001
H264 startcode 来达到一帧帧正常播放
关于 NALU的介绍,引用网上的一篇博客
https://www.cnblogs.com/shakin/p/3714860.html
提取流程
1、打开MP4文件,循环读取
2、从AVPacket获取IDR(nalu type==5),从AVFormatContext->streams[video_index]->codec->extradata获取SPS与PPS。
3、添加start code,给SPS与PPS前加上00 00 00 01 4个字节,(非SPS与PPS)IDR加上00 00 01 3个字节
4、按照下图的数据结构写入文件中
5、继续提取NALU,添加start code(00 00 01),写入文件中
注:
1、SPS与PPS存在于AVFormatContext->streams[video_index]->codec->extradata
2、其他的NALU存在于读取出来的AVPacket中
3、AVPacket->data前四个字节表示当前NALU的大小,根据这一条件可以获取NALU
4、AVFormatContext->streams[video_index]->codec->extradata + 5,之后两个字节表示SPS的个数,
5、SPS/PPS也是 NALU,在 IDR帧之前就要 SPS/PPS,只要它分析出 00 00 00 01 或 00 00 01它就知道这是一个NALU
代码:
#include
#include
#include
#include
//置成0001
#ifndef AV_WB32
# define AV_WB32(p, val) do { \
uint32_t d = (val); \
((uint8_t*)(p))[3] = (d); \
((uint8_t*)(p))[2] = (d)>>8; \
((uint8_t*)(p))[1] = (d)>>16; \
((uint8_t*)(p))[0] = (d)>>24; \
} while(0)
#endif
#ifndef AV_RB16
# define AV_RB16(x) \
((((const uint8_t*)(x))[0] << 8) | \
((const uint8_t*)(x))[1])
#endif
/* 将数据复制并且增加start code */
static int alloc_and_copy(AVPacket *out,
const uint8_t *sps_pps, uint32_t sps_pps_size,
const uint8_t *in, uint32_t in_size)
{
uint32_t offset = out->size;
uint8_t nal_header_size = offset ? 3 : 4; //判断是否SPS/PPS
int err;
/* 扩容 分配内存*/
err = av_grow_packet(out, sps_pps_size + in_size + nal_header_size);
if (err < 0)
return err;
if (sps_pps)
memcpy(out->data + offset, sps_pps, sps_pps_size);
memcpy(out->data + sps_pps_size + nal_header_size + offset, in, in_size);
if (!offset) {
AV_WB32(out->data + sps_pps_size, 1); //0001
} else {
(out->data + offset + sps_pps_size)[0] =
(out->data + offset + sps_pps_size)[1] = 0;
(out->data + offset + sps_pps_size)[2] = 1; //001
}
return 0;
}
int h264_extradata_to_annexb(const uint8_t *codec_extradata, const int codec_extradata_size, AVPacket *out_extradata, int padding)
{
uint16_t unit_size;
uint64_t total_size = 0;
uint8_t *out = NULL, unit_nb, sps_done = 0,
sps_seen = 0, pps_seen = 0, sps_offset = 0, pps_offset = 0;
const uint8_t *extradata = codec_extradata + 4;
static const uint8_t nalu_header[4] = { 0, 0, 0, 1 };
int length_size = (*extradata++ & 0x3) + 1; // retrieve length coded size, 用于指示表示编码数据长度所需字节数
sps_offset = pps_offset = -1;
/* retrieve sps and pps unit(s) */
unit_nb = *extradata++ & 0x1f; /* number of sps unit(s) */
if (!unit_nb) {
goto pps;
}else {
sps_offset = 0;
sps_seen = 1;
}
while (unit_nb--) {
int err;
unit_size = AV_RB16(extradata);
total_size += unit_size + 4;
if (total_size > INT_MAX - padding) {
av_log(NULL, AV_LOG_ERROR,
"Too big extradata size, corrupted stream or invalid MP4/AVCC bitstream\n");
av_free(out);
return AVERROR(EINVAL);
}
if (extradata + 2 + unit_size > codec_extradata + codec_extradata_size) {
av_log(NULL, AV_LOG_ERROR, "Packet header is not contained in global extradata, "
"corrupted stream or invalid MP4/AVCC bitstream\n");
av_free(out);
return AVERROR(EINVAL);
}
if ((err = av_reallocp(&out, total_size + padding)) < 0)
return err;
memcpy(out + total_size - unit_size - 4, nalu_header, 4);
memcpy(out + total_size - unit_size, extradata + 2, unit_size);
extradata += 2 + unit_size;
pps:
if (!unit_nb && !sps_done++) {
unit_nb = *extradata++; /* number of pps unit(s) */
if (unit_nb) {
pps_offset = total_size;
pps_seen = 1;
}
}
}
if (out)
memset(out + total_size, 0, padding);
if (!sps_seen)
av_log(NULL, AV_LOG_WARNING,
"Warning: SPS NALU missing or invalid. "
"The resulting stream may not play.\n");
if (!pps_seen)
av_log(NULL, AV_LOG_WARNING,
"Warning: PPS NALU missing or invalid. "
"The resulting stream may not play.\n");
out_extradata->data = out;
out_extradata->size = total_size;
return length_size;
}
int h264_mp4toannexb(AVFormatContext *fmt_ctx, AVPacket *in, FILE *dst_fd)
{
AVPacket *out = NULL;
AVPacket spspps_pkt;
int len;
uint8_t unit_type;
int32_t nal_size;
uint32_t cumul_size = 0;
const uint8_t *buf;
const uint8_t *buf_end;
int buf_size;
int ret = 0, i;
out = av_packet_alloc();
buf = in->data;
buf_size = in->size;
buf_end = in->data + in->size;
do {
ret= AVERROR(EINVAL);
if (buf + 4 /*s->length_size*/ > buf_end)
goto fail;
/* 前四个字节表示当前NALU的大小 */
for (nal_size = 0, i = 0; i<4/*s->length_size*/; i++)
nal_size = (nal_size << 8) | buf[i];
buf += 4; /*s->length_size;*/
unit_type = *buf & 0x1f;
if (nal_size > buf_end - buf || nal_size < 0)
goto fail;
/*
if (unit_type == 7)
s->idr_sps_seen = s->new_idr = 1;
else if (unit_type == 8) {
s->idr_pps_seen = s->new_idr = 1;
*/
/* if SPS has not been seen yet, prepend the AVCC one to PPS */
/*
if (!s->idr_sps_seen) {
if (s->sps_offset == -1)
av_log(ctx, AV_LOG_WARNING, "SPS not present in the stream, nor in AVCC, stream may be unreadable\n");
else {
if ((ret = alloc_and_copy(out,
ctx->par_out->extradata + s->sps_offset,
s->pps_offset != -1 ? s->pps_offset : ctx->par_out->extradata_size - s->sps_offset,
buf, nal_size)) < 0)
goto fail;
s->idr_sps_seen = 1;
goto next_nal;
}
}
}
*/
/* if this is a new IDR picture following an IDR picture, reset the idr flag.
* Just check first_mb_in_slice to be 0 as this is the simplest solution.
* This could be checking idr_pic_id instead, but would complexify the parsing. */
/*
if (!s->new_idr && unit_type == 5 && (buf[1] & 0x80))
s->new_idr = 1;
*/
/* prepend only to the first type 5 NAL unit of an IDR picture, if no sps/pps are already present */
if (/*s->new_idr && */unit_type == 5 /*&& !s->idr_sps_seen && !s->idr_pps_seen*/) {
/* 得到SPS与PPS(存在与codec->extradata中) */
h264_extradata_to_annexb( fmt_ctx->streams[in->stream_index]->codec->extradata,
fmt_ctx->streams[in->stream_index]->codec->extradata_size,
&spspps_pkt,
AV_INPUT_BUFFER_PADDING_SIZE);
/* 添加start code */
if ((ret=alloc_and_copy(out,spspps_pkt.data, spspps_pkt.size, buf, nal_size)) < 0)
goto fail;
/*s->new_idr = 0;*/
/* if only SPS has been seen, also insert PPS */
}
/*else if (s->new_idr && unit_type == 5 && s->idr_sps_seen && !s->idr_pps_seen) {
if (s->pps_offset == -1) {
av_log(ctx, AV_LOG_WARNING, "PPS not present in the stream, nor in AVCC, stream may be unreadable\n");
if ((ret = alloc_and_copy(out, NULL, 0, buf, nal_size)) < 0)
goto fail;
} else if ((ret = alloc_and_copy(out,
ctx->par_out->extradata + s->pps_offset, ctx->par_out->extradata_size - s->pps_offset,
buf, nal_size)) < 0)
goto fail;
}*/ else {
if ((ret=alloc_and_copy(out, NULL, 0, buf, nal_size)) < 0)
goto fail;
/*
if (!s->new_idr && unit_type == 1) {
s->new_idr = 1;
s->idr_sps_seen = 0;
s->idr_pps_seen = 0;
}
*/
}
len = fwrite( out->data, 1, out->size, dst_fd);
if(len != out->size){
av_log(NULL, AV_LOG_DEBUG, "warning, length of writed data isn't equal pkt.size(%d, %d)\n",
len,
out->size);
}
fflush(dst_fd);
next_nal:
buf += nal_size;
cumul_size += nal_size + 4;//s->length_size;
} while (cumul_size < buf_size);
/*
ret = av_packet_copy_props(out, in);
if (ret < 0)
goto fail;
*/
fail:
av_packet_free(&out);
return ret;
}
int main(int argc, char *argv[])
{
int err_code;
char errors[1024];
char *src_filename = NULL;
char *dst_filename = NULL;
FILE *dst_fd = NULL;
int video_stream_index = -1;
//AVFormatContext *ofmt_ctx = NULL;
//AVOutputFormat *output_fmt = NULL;
//AVStream *out_stream = NULL;
AVFormatContext *fmt_ctx = NULL;
AVPacket pkt;
//AVFrame *frame = NULL;
av_log_set_level(AV_LOG_DEBUG);
if(argc < 3){
av_log(NULL, AV_LOG_DEBUG, "the count of parameters should be more than three!\n");
return -1;
}
src_filename = argv[1];
dst_filename = argv[2];
if(src_filename == NULL || dst_filename == NULL){
av_log(NULL, AV_LOG_ERROR, "src or dts file is null, plz check them!\n");
return -1;
}
/*register all formats and codec*/
av_register_all();
dst_fd = fopen(dst_filename, "wb");
if (!dst_fd) {
av_log(NULL, AV_LOG_DEBUG, "Could not open destination file %s\n", dst_filename);
return -1;
}
/*open input media file, and allocate format context*/
if((err_code = avformat_open_input(&fmt_ctx, src_filename, NULL, NULL)) < 0){
av_strerror(err_code, errors, 1024);
av_log(NULL, AV_LOG_DEBUG, "Could not open source file: %s, %d(%s)\n",
src_filename,
err_code,
errors);
return -1;
}
/*dump input information*/
av_dump_format(fmt_ctx, 0, src_filename, 0);
/*initialize packet*/
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
/*find best video stream*/
video_stream_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if(video_stream_index < 0){
av_log(NULL, AV_LOG_DEBUG, "Could not find %s stream in input file %s\n",
av_get_media_type_string(AVMEDIA_TYPE_VIDEO),
src_filename);
return AVERROR(EINVAL);
}
/*read frames from media file*/
while(av_read_frame(fmt_ctx, &pkt) >=0 ){
if(pkt.stream_index == video_stream_index){
/*
pkt.stream_index = 0;
av_write_frame(ofmt_ctx, &pkt);
av_free_packet(&pkt);
*/
h264_mp4toannexb(fmt_ctx, &pkt, dst_fd);
}
//release pkt->data
av_packet_unref(&pkt);
}
//av_write_trailer(ofmt_ctx);
/*close input media file*/
avformat_close_input(&fmt_ctx);
if(dst_fd) {
fclose(dst_fd);
}
//avio_close(ofmt_ctx->pb);
return 0;
}
编译: gcc -g -o test test.c -I/home/FFmpeg/ffmpeg/ffmpeg-4.1.3 -lavutil -lavformat -lavcodec
执行 : ./test test.wmv myVideo.h264 最后会把 test.wmv文件中的视频频分离出来 并写入了 myVideo.h264
执行:ffplay myVideo.h264 可以发现没有声音 只有画面
2.3 FFmpeg MP4转Flv
avformat_alloc_output_context2()
函数可以初始化一个用于输出的AVFormatContext结构体。它的声明位于 libavformat\avformat.h
avformat_new_stream 在 AVFormatContext 中创建 Stream 通道。
avformat_write_header() 用于写视频文件头。
av_write_trailer() 用于写视频文件尾。
av_interleaved_write_frame 将数据包写入媒体文件
代码:
#include
#include
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
tag,
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
int main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
const char *in_filename, *out_filename;
int ret, i;
int stream_index = 0;
int *stream_mapping = NULL;
int stream_mapping_size = 0;
if (argc < 3) {
printf("usage: %s input output\n"
"API example program to remux a media file with libavformat and libavcodec.\n"
"The output format is guessed according to the file extension.\n"
"\n", argv[0]);
return 1;
}
in_filename = argv[1];
out_filename = argv[2];
av_register_all();
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
stream_mapping_size = ifmt_ctx->nb_streams;
stream_mapping = av_mallocz_array(stream_mapping_size, sizeof(*stream_mapping));
if (!stream_mapping) {
ret = AVERROR(ENOMEM);
goto end;
}
ofmt = ofmt_ctx->oformat;
//查询所有媒体流通道
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *out_stream;
AVStream *in_stream = ifmt_ctx->streams[i];
//获取输入的编解码器
AVCodecParameters *in_codecpar = in_stream->codecpar;
//判断编解码器
if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
stream_mapping[i] = -1;
continue;
}
stream_mapping[i] = stream_index++;
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
//编解码器参数赋值到输出文件
ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
if (ret < 0) {
fprintf(stderr, "Failed to copy codec parameters\n");
goto end;
}
out_stream->codecpar->codec_tag = 0;
}
av_dump_format(ofmt_ctx, 0, out_filename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
while (1) {
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
break;
in_stream = ifmt_ctx->streams[pkt.stream_index];
if (pkt.stream_index >= stream_mapping_size ||
stream_mapping[pkt.stream_index] < 0) {
av_packet_unref(&pkt);
continue;
}
pkt.stream_index = stream_mapping[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, &pkt, "in");
/* copy packet */
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
log_packet(ofmt_ctx, &pkt, "out");
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_packet_unref(&pkt);
}
av_write_trailer(ofmt_ctx);
end:
avformat_close_input(&ifmt_ctx);
/* close output */
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
av_freep(&stream_mapping);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}