音视频同步有三种方式
AVPlayer 修改视频的播放速度
属性:
/* indicates the current rate of playback; 0.0 means "stopped", 1.0 means "play at the natural rate of the current item" */
@property (nonatomic) float rate;
实例代码:
-(void)setPlaybackRate:(float)playbackRate
{
_playbackRate = playbackRate;
if (_player != nil && !isFloatZero(_player.rate)) {
_player.rate = _playbackRate;
}
}
AudioQueue修改视频的播放速度
实例代码:
- (void)setPlaybackRate:(float)playbackRate
{
if (fabsf(playbackRate - 1.0f) <= 0.000001) {
UInt32 propValue = 1;
AudioQueueSetProperty(_audioQueueRef, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue));
AudioQueueSetParameter(_audioQueueRef, kAudioQueueParam_PlayRate, 1.0f);
} else {
UInt32 propValue = 0;
AudioQueueSetProperty(_audioQueueRef, kAudioQueueProperty_TimePitchBypass, &propValue, sizeof(propValue));
AudioQueueSetParameter(_audioQueueRef, kAudioQueueParam_PlayRate, playbackRate);
}
}
ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");
if (ffp->decoder_reorder_pts == -1) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
} else if (ffp->decoder_reorder_pts) {
frame->pts = frame->pkt_pts;
} else {
frame->pts = frame->pkt_dts;
}
}
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);
函数实例:
ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
if (got_frame) {
AVRational tb = (AVRational){1, frame->sample_rate};
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
else if (frame->pkt_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
else if (d->next_pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
if (frame->pts != AV_NOPTS_VALUE) {
d->next_pts = frame->pts + frame->nb_samples;
d->next_pts_tb = tb;
}
}
音频一个AVPacket中只解析一个AVFrame, pkt_pts 可以当做真实的pts
从某个音频或者视频流的时间基准中得到时间戳的 AVRational
AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx);
typedef struct FrameQueue {
Frame queue[FRAME_QUEUE_SIZE];
int rindex; // read index
int windex; // write index
int size; // 大小
int max_size;
int keep_last;
int rindex_shown; // read shown 已经读取的个数
SDL_mutex *mutex;
SDL_cond *cond;
PacketQueue *pktq;
} FrameQueue;
static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){ int ret= avcodec_default_get_buffer(c, pic); uint64_t *pts= av_malloc(sizeof(uint64_t)); *pts= global_video_pkt_pts; pic->opaque= pts; return ret; } static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){ if(pic) av_freep(&pic->opaque); avcodec_default_release_buffer(c, pic);
/* NOTE: ipts is the PTS of the _first_ picture beginning in this packet, if any */ global_video_pkt_pts= pkt->pts; len1 = avcodec_decode_video(is->video_st->codec, frame, &got_picture, pkt->data, pkt->size); if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE) && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE) pts= *(uint64_t*)frame->opaque; else if(pkt->dts != AV_NOPTS_VALUE) pts= pkt->dts; else pts= 0; pts *= av_q2d(is->video_st->time_base); // if (len1 < 0) // break; if (got_picture) { if (output_picture2(is, frame, pts) < 0) goto the_end; } av_free_packet(pkt); if (step) if (cur_stream) stream_pause(cur_stream);