1、打开输入文件
2、创建并打开一个空文件存储 flv 格式音视频数据
3、遍历输入文件的每一路流,每个输入流对应创建一个输出流,并将输入流中的编解码参数直接拷贝到输出流中。
4、写入新的多媒体文件的头
5、快进视频流到要截取的时间
6、在循环遍历输入文件的每一帧,对每一个packet进行时间基的转换
7、将处理好的pkt写入输出文件
8、超过要结束的时间跳出循环
9、写入新的多媒体文件尾
10、释放相关资源
文件操作
相关API
源码:
#include "SeekVideo.h"
#include
#include
#include
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
tag,
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
int cutVideo(double start_secondtime, double end_secondtime, const char *in_filename, const char *out_filename) {
AVOutputFormat *outfmt = NULL; //输出格式
AVFormatContext *infmt_ctx = NULL, *outfmt_ctx = NULL; //输入/输出上下文
AVCodecContext *codec_ctx = NULL;//解码器上下文
AVPacket pkt;
int ret, i;
av_register_all();
//打开输入文件为 infmt_ctx 分配内存
ret = avformat_open_input(&infmt_ctx, in_filename, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
//检索输入文件的流信息
ret = avformat_find_stream_info(infmt_ctx, 0);
if (ret < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
//打印输入文件的相关信息
av_dump_format(infmt_ctx, 0, in_filename, 0);
//为输出上下文分配内存
avformat_alloc_output_context2(&outfmt_ctx, NULL, NULL, out_filename);
if (!outfmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
outfmt = outfmt_ctx->oformat;
for (i = 0; i < infmt_ctx->nb_streams; i++) {
AVStream *in_stream = infmt_ctx->streams[i];
//创建一个输出流
AVStream *out_stream = avformat_new_stream(outfmt_ctx, NULL);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
//获取解码器
AVCodec *pAVCodec = avcodec_find_decoder(in_stream->codecpar->codec_id);
//为解码上下文分配内存
codec_ctx = avcodec_alloc_context3(pAVCodec);
//把avstream中的参数复制到codec中
ret = avcodec_parameters_to_context(codec_ctx, in_stream->codecpar);
if (ret < 0){
printf("Failed to copy in_stream codecpar to codec context\n");
goto end;
}
codec_ctx->codec_tag = 0;
if (outfmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
//把codec中的参数复制到avstream中
ret = avcodec_parameters_from_context(out_stream->codecpar, codec_ctx);
}
//打印输出文件的相关信息
av_dump_format(outfmt_ctx, 0, out_filename, 1);
if (!(outfmt->flags & AVFMT_NOFILE)) {
ret = avio_open2(&outfmt_ctx->pb, out_filename, AVIO_FLAG_WRITE, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
//写入新的多媒体文件的头
ret = avformat_write_header(outfmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
//从start_secondtime秒开始截取
ret = av_seek_frame(infmt_ctx, -1, start_secondtime*AV_TIME_BASE, AVSEEK_FLAG_ANY);
if (ret < 0) {
fprintf(stderr, "Error seek\n");
goto end;
}
int64_t *dts_start_from = malloc(sizeof(int64_t) * infmt_ctx->nb_streams);
memset(dts_start_from, 0, sizeof(int64_t) * infmt_ctx->nb_streams);
int64_t *pts_start_from = malloc(sizeof(int64_t) * infmt_ctx->nb_streams);
memset(pts_start_from, 0, sizeof(int64_t) * infmt_ctx->nb_streams);
while (1) {
AVStream *in_stream, *out_stream;
//循环读取每一帧数据
ret = av_read_frame(infmt_ctx, &pkt);
if (ret < 0) {
break;
}
//获取与pkt对应的的输入流
in_stream = infmt_ctx->streams[pkt.stream_index];
//获取与pkt对应的的输出流
out_stream = outfmt_ctx->streams[pkt.stream_index];
log_packet(infmt_ctx, &pkt, "in");
// Convert an AVRational to a `double`.,截取到末尾时间时就跳出
if (av_q2d(in_stream->time_base) * pkt.pts > end_secondtime) {
av_packet_unref(&pkt);
break;
}
//将截取的开始时间的dts保存起来
if (dts_start_from[pkt.stream_index] == 0) {
dts_start_from[pkt.stream_index] = pkt.dts;
printf("dts_start_from: %s\n",av_ts2str(dts_start_from[pkt.stream_index]));
}
//将截取的开始时间的pts保存起来
if (pts_start_from[pkt.stream_index] == 0) {
pts_start_from[pkt.stream_index] = pkt.pts;
printf("pts_start_from: %s\n", av_ts2str(pts_start_from[pkt.stream_index]));
}
//对pts、dts、duration 进行时间基转换,减去
pkt.pts = av_rescale_q_rnd(pkt.pts - pts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_INF | AV_ROUND_PASS_MINMAX);
pkt.dts = av_rescale_q_rnd(pkt.dts - dts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_INF | AV_ROUND_PASS_MINMAX);
if (pkt.pts < 0) {
pkt.pts = 0;
}
if (pkt.dts < 0) {
pkt.dts = 0;
}
pkt.duration = (int)av_rescale_q((int64_t)pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
log_packet(outfmt_ctx, &pkt, "out");
printf("\n");
//将处理好的pkt写入输出文件
ret = av_interleaved_write_frame(outfmt_ctx, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_packet_unref(&pkt);
}
free(dts_start_from);
free(pts_start_from);
//写入新的多媒体文件尾
av_write_trailer(outfmt_ctx);
end:
avformat_close_input(&infmt_ctx);
if (outfmt_ctx && !(outfmt->flags & AVFMT_NOFILE)) {
avio_closep(&outfmt_ctx->pb);
}
avformat_free_context(outfmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred:%s\n", av_err2str(ret));
return -1;
}
return 0;
}