int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,
int flags);
数说明:
s:输入流上下文;
stream_index:基本流索引,表示当前的seek是针对哪个基本流,比如视频或者音频等等。
timestamp:要seek的时间点,以time_base或者AV_TIME_BASE为单位。
Flags:seek标志,可以设置为按字节,在按时间seek时取该点之前还是之后的关键帧,以及不按关键帧seek等,详细请参考FFmpeg的avformat.h说明。基于FFmpeg的所有track mode几乎都是用这个函数来直接或间接实现的。
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,int flags);
要转跳到视频100秒(100 000毫秒)处的第一个I帧(如果没有则向前找第一个):av_seek_frame(pFormatCtx, vid_index, 100000*vid_time_scale/time_base, AVSEEK_FLAG_BACKWARD);
跳到音频80秒(80 000毫秒)处的帧(采样):
av_seek_frame(pFormatCtx, aud_index, 80000*aud_time_scale/ time_base,AVSEEK_FLAG_BACKWARD);
跳到文件开始的地方开始播放:
av_seek_frame(pFormatCtx, vid_index, 0, AVSEEK_FLAG_BACKWARD);
#include "rtmp_source.hpp"
#include "libavutil/timestamp.h"
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
// 获得对应的base
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("%s: pts: %s pkt_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
tag,
// 将转为字符串
av_ts2str(pkt->pts),
// 对应的事件的pts转为具体的时间
av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts),
av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration),
av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
int cut_video(double from_seconds, double end_seconds, const char *in_filename, const char *out_filename)
{
AVOutputFormat *ofmt = nullptr;
AVFormatContext *ifmt_ctx = nullptr;
AVFormatContext *ofmt_ctx = nullptr;
AVPacket pkt;
int ret, i;
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0)
{
fprintf(stderr, "Error opening input file %s\n", in_filename);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
{
fprintf(stderr, "can't find stream infon\n");
goto end;
}
// 输出文件流的信息
av_dump_format(ifmt_ctx, 0, in_filename, 0);
// 创建输出上下文信息
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx)
{
fprintf(stderr, "could not allocate output context");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt = ofmt_ctx->oformat;
for (int i = 0; i < (int)ifmt_ctx->nb_streams; i++)
{
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream)
{
fprintf(stderr, "Error:failed to allocate output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
if (ret < 0)
{
fprintf(stderr, "failed to copy parameters\n");
goto end;
}
out_stream->codecpar->codec_tag = 0;
// 打印输出信息
av_dump_format(ofmt_ctx, 0, out_filename, 1);
if (!(ofmt->flags & AVFMT_NOFILE))
{
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
goto end;
}
}
avformat_write_header(ofmt_ctx, nullptr);
if (ret < 0)
{
fprintf(stderr, "Error opening");
goto end;
}
ret = av_seek_frame(ifmt_ctx, -1, from_seconds * AV_TIME_BASE, AVSEEK_FLAG_ANY);
// 定位到目标时间点
if (ret < 0)
{
fprintf(stderr, "Error seek!\n");
goto end;
}
int64_t *dts_start_from = (int64_t *)malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
memset(dts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
int64_t *pts_start_from = (int64_t *)malloc(sizeof(int64_t) * ifmt_ctx->nb_streams);
memset(pts_start_from, 0, sizeof(int64_t) * ifmt_ctx->nb_streams);
while (1)
{
AVStream *in_stream;
AVStream *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
{
break;
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, &pkt, "in");
if (av_q2d(in_stream->time_base) * pkt.pts > end_seconds)
{
av_packet_unref(&pkt);
}
if (pts_start_from[pkt.stream_index] == 0)
{
pts_start_from[pkt.stream_index] = pkt.pts;
}
if (dts_start_from[pkt.stream_index] == 0)
{
dts_start_from[pkt.stream_index] = pkt.dts;
}
pkt.dts = av_rescale_q_rnd(pkt.dts - dts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.pts = av_rescale_q_rnd(pkt.pts - pts_start_from[pkt.stream_index], in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0)
{
break;
}
av_packet_free(&pkt);
}
free(dts_start_from);
free(pts_start_from);
av_write_trailer(ofmt_ctx);
}
end:
avformat_close_input(&ifmt_ctx);
// g关闭输出的缓冲区的大小
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
{
avio_closep(&ofmt_ctx->pb);
}
avformat_free_context(ofmt_ctx);
return 0;
}
int main(int argc, char const *argv[])
{
if (argc < 5)
{
return -1;
}
double start = atoi(argv[1]);
double end = atoi(argv[2]);
cut_video(start, end, argv[3], argv[4]);
return 0;
}