apple 的hls方案,采用ffmpeg转码的ts流,播放时会渐渐变得音画不同步,sohu源的处理办法是每隔5分钟加一次discontinue标签,但是这个标签会导致原生播放器重启,表现得有点卡。针对这种情况,改造播放器,不让重启,直接读取下一个流是比较好的办法。但是ffmpeg处理hls的播放存在其它一些问题:1、每遇到discontinue时,显示的播放时长会清零;2、只能在第一个discontinue前进行拖放。本文针对这个问题,对ffmpeg进行改造,使对hls源更优雅的适配。
重点修改hls.c文件,可以用ffplay做前后对比,以便验证
1、添加函数
static int find_timestamp_in_seq_no( struct playlist *pls,int64_t *timestamp, int seq_no)
{
int i;
*timestamp=0;
for (i = 0; i < seq_no; i++) {
*timestamp += pls->segments[i]->duration ;
}
return 0;
}
2、修改函数:
struct segment {
int64_t duration;
int64_t url_offset;
int64_t size;
char *url;
char *key;
enum KeyType key_type;
uint8_t iv[16];
int64_t first_dts; //本文新增
};
static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
{
HLSContext *c = s->priv_data;
int ret, i, minplaylist = -1;
int64_t timestamp=AV_NOPTS_VALUE;
static int64_t lastdts[2]={AV_NOPTS_VALUE};
static int64_t lastdts_fix[2]={AV_NOPTS_VALUE};
static int64_t interdts[2]={AV_NOPTS_VALUE};
static int64_t whole_dts[2]={AV_NOPTS_VALUE};//累加的dts
recheck_discard_flags(s, c->first_packet);
for (i = 0; i < c->n_playlists; i++) {
struct playlist *pls = c->playlists[i];
/* Make sure we've got one buffered packet from each open playlist
* stream */
AVRational tb;
tb = get_timebase(pls);
find_timestamp_in_seq_no(pls,×tamp,pls->cur_seq_no);
if (pls->needed && !pls->pkt.data) {
while (1) {
int64_t ts_diff;
ret = av_read_frame(pls->ctx, &pls->pkt);
if (ret < 0) {
if (!url_feof(&pls->pb) && ret != AVERROR_EOF)
return ret;
reset_packet(&pls->pkt);
break;
} else {
//保存每个segments的第一个时间戳,作为seek到新的discontinue使用,不要求非常精准
if(pls->segments[pls->cur_seq_no]->first_dts==AV_NOPTS_VALUE)
{
pls->segments[pls->cur_seq_no]->first_dts=pls->pkt.dts;
// av_log(s, AV_LOG_ERROR, "seq_no=%d,dts=%lld\n",pls->cur_seq_no,pls->pkt.dts);
}
/* stream_index check prevents matching picture p_w_uploads etc. */
if (pls->is_id3_timestamped && pls->pkt.stream_index == 0) {
/* audio elementary streams are id3 timestamped */
fill_timing_for_id3_timestamped_stream(pls);
}
if (c->first_timestamp == AV_NOPTS_VALUE &&
pls->pkt.dts != AV_NOPTS_VALUE)
c->first_timestamp = av_rescale_q(pls->pkt.dts,
get_timebase(pls), AV_TIME_BASE_Q);
}
if (pls->seek_timestamp == AV_NOPTS_VALUE)
{
//当前时间戳正常时,赋给lastdts
if(pls->pkt.pts > lastdts[pls->pkt.stream_index])
{
//保存最后一个时间戳,以便作为下次累加的右值,最后一帧的pts与dts一样,因此可以共用
lastdts[pls->pkt.stream_index]=pls->pkt.dts;
}
else//非正常时,即遇到discontinue时,累加至whole_dts
{
whole_dts[pls->pkt.stream_index]+=lastdts[pls->pkt.stream_index];
lastdts[pls->pkt.stream_index]=pls->pkt.dts;
av_log(s, AV_LOG_ERROR, "dts=%lld,index=%d\n",whole_dts[pls->pkt.stream_index],pls->pkt.stream_index);
}
//whole_dts有值的时候,与read packet里面的dts相加,得到递增的dts
if(whole_dts[pls->pkt.stream_index]!=AV_NOPTS_VALUE)
{
// av_log(s, AV_LOG_ERROR, "lastdts=%lld,%lld\n",lastdts,pls->pkt.dts);
pls->pkt.dts+=interdts[pls->pkt.stream_index] + whole_dts[pls->pkt.stream_index];
pls->pkt.pts+=interdts[pls->pkt.stream_index] + whole_dts[pls->pkt.stream_index];
}
//计算音视频的间隔值,以dts为准
if(interdts[pls->pkt.stream_index]==AV_NOPTS_VALUE &&
lastdts[pls->pkt.stream_index]!=AV_NOPTS_VALUE)
{
interdts[pls->pkt.stream_index]=pls->pkt.dts-lastdts[pls->pkt.stream_index];
av_log(s, AV_LOG_ERROR, "inter dts=%lld,index=%d\n",interdts,pls->pkt.stream_index);
}
break;
}
else //seek 时,要保存前一个discontinue的lastdts
{
// whole_dts[0]+=lastdts[pls->pkt.stream_index];
// whole_dts[1]+=lastdts[pls->pkt.stream_index];
}
if (pls->seek_stream_index < 0 ||
pls->seek_stream_index == pls->pkt.stream_index) {
if (pls->pkt.dts == AV_NOPTS_VALUE) {
pls->seek_timestamp = AV_NOPTS_VALUE;
break;
}
ts_diff = timestamp + av_rescale_rnd(pls->pkt.dts - pls->segments[pls->cur_seq_no]->first_dts, AV_TIME_BASE,
tb.den, AV_ROUND_DOWN) - pls->seek_timestamp;
if (ts_diff >= 0 && (pls->seek_flags & AVSEEK_FLAG_ANY ||
pls->pkt.flags & AV_PKT_FLAG_KEY)) {
whole_dts[0]=(pls->seek_timestamp+ts_diff) * tb.den/(tb.num*AV_TIME_BASE) - pls->pkt.dts ;
whole_dts[1]=(pls->seek_timestamp+ts_diff) * tb.den/(tb.num*AV_TIME_BASE) - pls->pkt.dts ;
av_log(s, AV_LOG_ERROR, "whold dts=%lld,dts=%lld\n",whole_dts[0],pls->pkt.dts);
pls->seek_timestamp = AV_NOPTS_VALUE;
break;
}
}
av_free_packet(&pls->pkt);
reset_packet(&pls->pkt);
}
}
/* Check if this stream has the packet with the lowest dts */
if (pls->pkt.data) {
struct playlist *minpls = minplaylist < 0 ?
NULL : c->playlists[minplaylist];
if (minplaylist < 0) {
minplaylist = i;
} else {
int64_t dts = pls->pkt.dts;
int64_t mindts = minpls->pkt.dts;
if (dts == AV_NOPTS_VALUE ||
(mindts != AV_NOPTS_VALUE && compare_ts_with_wrapdetect(dts, pls, mindts, minpls) < 0))
minplaylist = i;
}
}
}
/* If we got a packet, return it */
if (minplaylist >= 0) {
struct playlist *pls = c->playlists[minplaylist];
*pkt = pls->pkt;
pkt->stream_index += pls->stream_offset;
reset_packet(&c->playlists[minplaylist]->pkt);
if (pkt->dts != AV_NOPTS_VALUE)
c->cur_timestamp = av_rescale_q(pkt->dts,
pls->ctx->streams[pls->pkt.stream_index]->time_base,
AV_TIME_BASE_Q);
return 0;
}
return AVERROR_EOF;
}