Android 之理解 VSYNC 信号
Android应用性能优化系列视频双语字幕讲解 By Google
Android Project Butter分析
Android Choreographer 源码分析
Android系统Choreographer机制实现过程
android_view_DisplayEventReceiver
DisplayEventDispatcher.h
DisplayEventDispatcher.cpp
looper.cpp
相关概念
:
接下来从源码的角度分析上面结论是如何而来
public void requestLayout() {
if (!mHandlingLayoutInLayoutRequest) {
// 1.线程校验, 创建View的线程才能进行View的绘制操作
checkThread();
// 2.设置标识为true, 开始进行绘制
mLayoutRequested = true;
// 3.进行绘制三部曲:measure、layout、draw
scheduleTraversals();
}
}
measure
、layout
、draw
绘制操作void scheduleTraversals() {
/**
* 1.mTraversalScheduled默认为false, scheduleTraversals该方法被第一次调用
* 时, 被置为true, 在unscheduleTraversals()、doTraversal()中会被置为false
*
*/
if (!mTraversalScheduled) {
mTraversalScheduled = true;
/**
* 2.设置同步栅栏, 防止UI线程中同步消息执行, 这样做为了加快vsync的响应速度, 如果
* 设置, vsync到来的时候, 正在执行一个同步消息, 那么UI更新的Task就会被延迟执行
*/
mTraversalBarrier = mHandler.getLooper().getQueue().postSyncBarrier();
// 3.发送消息, 请求vsync垂直同步信号, 注意这里传入的是mTraversalRunnable, 对应
// CallbackRecord.run方法
mChoreographer.postCallback(
Choreographer.CALLBACK_TRAVERSAL, mTraversalRunnable, null);
if (!mUnbufferedInputDispatch) {
scheduleConsumeBatchedInput();
}
notifyRendererOfFramePending();
pokeDrawLockIfNeeded();
}
}
private int postSyncBarrier(long when) {
// Enqueue a new sync barrier token.
// We don't need to wake the queue because the purpose of a barrier is to stall it.
synchronized (this) {
final int token = mNextBarrierToken++;
final Message msg = Message.obtain();
msg.markInUse();
msg.when = when;
msg.arg1 = token;
Message prev = null;
Message p = mMessages;
if (when != 0) {
while (p != null && p.when <= when) {
prev = p;
p = p.next;
}
}
if (prev != null) { // invariant: p == prev.next
msg.next = p;
prev.next = msg;
} else {
msg.next = p;
mMessages = msg;
}
return token;
}
}
Message next() {
for (;;) {
synchronized (this) {
// Try to retrieve the next message. Return if found.
final long now = SystemClock.uptimeMillis();
Message prevMsg = null;
Message msg = mMessages;
if (msg != null && msg.target == null) {
/**
* 1.如果target == null, 会进行遍历链表操作, 如果获取到的
* 节点Message是同步Message, 则继续遍历链表获取下一个节点
*/
do {
prevMsg = msg;
msg = msg.next;
} while (msg != null && !msg.isAsynchronous());
}
}
}
}
关于Handler.post, Handler.sendMessage这些发送的也都是同步消息, 代码暂时不表
private void postCallbackDelayedInternal(int callbackType,
Object action, Object token, long delayMillis) {
synchronized (mLock) {
final long now = SystemClock.uptimeMillis();
final long dueTime = now + delayMillis;
mCallbackQueues[callbackType].addCallbackLocked(dueTime, action, token);
if (dueTime <= now) {
scheduleFrameLocked(now);
}
}
}
private void scheduleFrameLocked(long now) {
// 1.使用mFrameScheduled标志位保证在当前申请的vsync信号到来之前不会再去请求vsync信号
if (!mFrameScheduled) {
mFrameScheduled = true;
if (USE_VSYNC) {
//2.如果是在同一个线程中, 则立即调用scheduleVsyncLocked方法发送一个vsync信号
if (isRunningOnLooperThreadLocked()) {
scheduleVsyncLocked();
} else {
//3.如果不在同一个线程中, 通过Handler方法mLooper所在线程发送vsync信号
Message msg = mHandler.obtainMessage(MSG_DO_SCHEDULE_VSYNC);
msg.setAsynchronous(true);
mHandler.sendMessageAtFrontOfQueue(msg);
}
}
}
}
Choreographer.scheduleVsyncLocked
---mDisplayEventReceiver.scheduleVsync()
---nativeScheduleVsync()
//
private static native void nativeScheduleVsync(long receiverPtr);
Choreographer的作用:
负责获取VSYNC信号(脉冲信号)并控制UI线程完成图像绘制.
当收到VSYNC信号时, 去调用使用者通过postCallback设置的回调函数, 目前一共定义了三种类型的回调, 它们分别是:
public ViewRootImpl(Context context, Display display) {
mChoreographer = Choreographer.getInstance();
}
public static Choreographer getInstance() {
return sThreadInstance.get();
}
private Choreographer(Looper looper, int vsyncSource) {
mLooper = looper;
// 1.创建消息处理的Handler
mHandler = new FrameHandler(looper);
// 2.如果系统使用了vsync机制,则注册一个FrameDisplayEventReceiver接收器
mDisplayEventReceiver = USE_VSYNC
? new FrameDisplayEventReceiver(looper, vsyncSource)
: null;
// 3.标记上一个frame的渲染时间
mLastFrameTimeNanos = Long.MIN_VALUE;
// 4.帧率, 手机上为16ms/帧
mFrameIntervalNanos = (long)(1000000000 / getRefreshRate());
// 5.创建回调数组
mCallbackQueues = new CallbackQueue[CALLBACK_LAST + 1];
// 6.初始化数组, 在下一帧开始渲染时会回调对应的CallbackQueue
for (int i = 0; i <= CALLBACK_LAST; i++) {
mCallbackQueues[i] = new CallbackQueue();
}
// b/68769804: For low FPS experiments.
setFPSDivisor(SystemProperties.getInt(ThreadedRenderer.DEBUG_FPS_DIVISOR, 1));
}
Choreographer初始化时创建FrameHandler对象
private final class FrameHandler extends Handler {
@Override
public void handleMessage(Message msg) {
switch (msg.what) {
case MSG_DO_FRAME://开始渲染下一帧的操作
doFrame(System.nanoTime(), 0);
break;
case MSG_DO_SCHEDULE_VSYNC://请求vsync信号
doScheduleVsync();
break;
case MSG_DO_SCHEDULE_CALLBACK://请求执行callback操作
doScheduleCallback(msg.arg1);
break;
}
}
}
MSG_DO_FRAME:
开始渲染下一帧
MSG_DO_SCHEDULE_VSYNC:
请求vsync信号
MSG_DO_SCHEDULE_CALLBACK:
请求执行callback
vsync信号由SurfaceFlinger实现并定时发送, FrameDisplayEventReceiver主要是用来接收同步脉冲信号vsync.
public DisplayEventReceiver(Looper looper, int vsyncSource) {
//1.获取当前线程的消息队列
mMessageQueue = looper.getQueue();
//2.持有native层的NativeDisplayEventReceiver的引用
mReceiverPtr = nativeInit(new WeakReference<DisplayEventReceiver>(this), mMessageQueue,
vsyncSource);
mCloseGuard.open("dispose");
}
nativeInit涉及到native层代码, 暂时涉及到native层的代码只简单的阅读一下顺便熟悉C/C++的语言
/**
* 1.receiverWeak -> java层DisplayEventReceiver
* 2.mMessageQueue -> java层当前线程的MessageQueue
*/
static jlong nativeInit(JNIEnv* env, jclass clazz, jobject receiverWeak,
jobject messageQueueObj, jint vsyncSource) {
sp<MessageQueue> messageQueue = android_os_MessageQueue_getMessageQueue(env, messageQueueObj);
if (messageQueue == NULL) {
jniThrowRuntimeException(env, "MessageQueue is not initialized.");
return 0;
}
//1.创建Native层的NativeDisplayEventReceiver引用, 该实例对应持有java层的
// FrameDisplayEventReceiver
//2.NativeDisplayEventReceiver顶层父类是LooperCallback
sp<NativeDisplayEventReceiver> receiver = new NativeDisplayEventReceiver(env,
receiverWeak, messageQueue, vsyncSource);
//3.监听mReceive所获取的文件句柄, 一旦有数据到来, 则回调NativeDisplayEventReceiver
// 中复写的LooperCallback对象的handleEvent
status_t status = receiver->initialize();
receiver->incStrong(gDisplayEventReceiverClassInfo.clazz); // retain a reference for the object
return reinterpret_cast<jlong>(receiver.get());
}
其中receiver.initialize中有涉及到addFd函数
status_t DisplayEventDispatcher::initialize() {
status_t result = mReceiver.initCheck();
//1.添加文件描述符, 第三个参数EVENT_INPUT
int rc = mLooper->addFd(mReceiver.getFd(), 0, Looper::EVENT_INPUT, this, NULL);
if (rc < 0) {
return UNKNOWN_ERROR;
}
return OK;
}
vsync信号由SurfaceFlinger定时发送, 唤醒DispSyncThread线程, 再到EventThread线程, 然后通过BitTube直接传递到目标进程所对应的目标线程, 执行handleEvent方法—这段话全部为摘抄
接下来省略一大堆C++代码, 只画流程
android_view_DisplayEventReceiver.handleEvent
---android_view_DisplayEventReceiver.processPendingEvents
---android_view_DisplayEventReceiver.dispatchVsync
---FrameDisplayEventReceiver.dispatchVsync
因此最终回调到java层的FrameDisplayEventReceiver.dispatchVsync方法
private void dispatchVsync(long timestampNanos, long physicalDisplayId, int frame) {
onVsync(timestampNanos, physicalDisplayId, frame);
}
由此可见, 当发生了vsync信号时, 会回调到FrameDisplayEventReceiver.onVsync方法.
@Override
public void onVsync(long timestampNanos, long physicalDisplayId, int frame) {
/**
* 1.将vsync事件通过Handler发送到MessageQueue中。如果队列中没有时间戳早于帧时间的消息,
* 则将立即处理vsync事件。否则,将首先处理早于vsync事件的消息。
* 2.总结: vsync信号到来时, 如果其他事件正在执行, 则继续执行当前的事件, 反之执行vsync信号
* 对应的事件, 如果非UI事件迟迟不能结束, 则会导致vsync信号被延迟, 给人的感觉就是丢帧
*/
long now = System.nanoTime();
if (mHavePendingVsync) {
} else {
mHavePendingVsync = true;
}
mTimestampNanos = timestampNanos;
mFrame = frame;
// 2.传递this回调run方法
Message msg = Message.obtain(mHandler, this);
msg.setAsynchronous(true);
// 3.mHandler: FrameHandler
mHandler.sendMessageAtTime(msg, timestampNanos / TimeUtils.NANOS_PER_MS);
}
public void run() {
mHavePendingVsync = false;
doFrame(mTimestampNanos, mFrame);
}
// 1.frameTimeNanos: 底层vsync信号到达的时间戳
void doFrame(long frameTimeNanos, int frame) {
final long startNanos;
synchronized (mLock) {
if (!mFrameScheduled) {
return; // no work to do
}
long intendedFrameTimeNanos = frameTimeNanos;
// 2.开始执行的时间戳(纳秒)
startNanos = System.nanoTime();
/**
* 3.jitterNanos: FrameDisplayEventReceiver接收到VSYNC信号到被执行的时间差
* 理论上jitterNanos应当 = 0, 由于某些原因导致绘制的开始时间被延迟, 如果
* jitterNanos > mFrameIntervalNanos, 也就是说绘制的起始时间被延误而且延误
* 超过了一个时钟周期, 开始绘制的时间被延迟且超过一个时钟周期, 也必然会导致
* 绘制结束的时间被延迟且超过一个时钟周期, 即vsync的时间间隔, 如果不作处理, 下
* 一次vsync信号到来时, 则由于本次draw并没有结束, 进而会出现掉帧的情况, 而且
* 依次下去, jitterNanos与vsync信号会出现持续性不同步的问题.
* 3.1 所以在打印掉帧相关日志之后, 会对时间戳进行偏移, 保证该时间戳能够和vsync信号
* 保持同步.
*/
final long jitterNanos = startNanos - frameTimeNanos;
// 4.如果这个时间 > 帧率(16.7ms), 则认为存在耗时操作
if (jitterNanos >= mFrameIntervalNanos) {
// 5.总时间间隔 / 帧率 = 掉帧数
final long skippedFrames = jitterNanos / mFrameIntervalNanos;
// 6.掉帧数 大于默认值, 打印掉帧数量相应的log
if (skippedFrames >= SKIPPED_FRAME_WARNING_LIMIT) {
Log.i(TAG, "Skipped " + skippedFrames + " frames! "
+ "The application may be doing too much work on its main thread.");
}
// 7.求余数 = 实际时间差 % 帧率: 偏差值
final long lastFrameOffset = jitterNanos % mFrameIntervalNanos;
// 8.打印偏差及跳帧信息
if (DEBUG_JANK) {
Log.d(TAG, "Missed vsync by " + (jitterNanos * 0.000001f) + " ms "
+ "which is more than the frame interval of "
+ (mFrameIntervalNanos * 0.000001f) + " ms! "
+ "Skipping " + skippedFrames + " frames and setting frame "
+ "time to " + (lastFrameOffset * 0.000001f) + " ms in the past.");
}
// 9.修正偏差值, 忽略偏差, 为了后续更好的同步工作
frameTimeNanos = startNanos - lastFrameOffset;
}
// 10.时间回溯, 则不进行任何工作, 等待下一个时钟信号的到来
if (frameTimeNanos < mLastFrameTimeNanos) {
if (DEBUG_JANK) {
Log.d(TAG, "Frame time appears to be going backwards. May be due to a "
+ "previously skipped frame. Waiting for next vsync.");
}
// 11.请求下一次时钟信号
scheduleVsyncLocked();
return;
}
// 12.记录当前frame信息
mFrameInfo.setVsync(intendedFrameTimeNanos, frameTimeNanos);
// 13.将标识位置为false, 在向底层发送vsync信号时, 将标识位置为true, 收到vsync
// 回调之后将标识位置为false, 也就是说在没有开始进行当前帧的绘制之前, 不会再次
// 发送vsync信号, 即不会通知cpu和gpu进行下一帧的准备工作.
mFrameScheduled = false;
mLastFrameTimeNanos = frameTimeNanos;
}
try {
// 13.执行相关的callback
AnimationUtils.lockAnimationClock(frameTimeNanos / TimeUtils.NANOS_PER_MS);
// 13.1 input事件
doCallbacks(Choreographer.CALLBACK_INPUT, frameTimeNanos);
// 13.2 animation事件
doCallbacks(Choreographer.CALLBACK_ANIMATION, frameTimeNanos);
doCallbacks(Choreographer.CALLBACK_INSETS_ANIMATION, frameTimeNanos);
// 13.3 traversal事件
doCallbacks(Choreographer.CALLBACK_TRAVERSAL, frameTimeNanos);
doCallbacks(Choreographer.CALLBACK_COMMIT, frameTimeNanos);
} finally {
AnimationUtils.unlockAnimationClock();
}
}
四个事件:
CALLBACK_INPUT、CALLBACK_ANIMATION、CALLBACK_TRAVERSAL、CALLBACK_COMMITjitterNanos < mFrameIntervalNanos
的情况, 此时直接使用该mFrameIntervalNanos(也就是Vsync3StartTime)
时间进行回调jitterNanos >= mFrameIntervalNanos
, 此时对mFrameIntervalNanos进行偏移, 偏移到前一个Vsync的startTime.时间修正
既保证了doFrame操作和vsync保持同步节奏, 又保证实际启动时间与记录的时间点相差不会太大, 便于同步及分析doCallbacks回调依次传入四个事件: CALLBACK_INPUT
、CALLBACK_ANIMATION
、CALLBACK_TRAVERSAL
、CALLBACK_COMMIT
void doCallbacks(int callbackType, long frameTimeNanos) {
CallbackRecord callbacks;
synchronized (mLock) {
final long now = System.nanoTime();
callbacks = mCallbackQueues[callbackType].extractDueCallbacksLocked(
now / TimeUtils.NANOS_PER_MS);
mCallbacksRunning = true;
if (callbackType == Choreographer.CALLBACK_COMMIT) {
final long jitterNanos = now - frameTimeNanos;
if (jitterNanos >= 2 * mFrameIntervalNanos) {
final long lastFrameOffset = jitterNanos % mFrameIntervalNanos
+ mFrameIntervalNanos;
frameTimeNanos = now - lastFrameOffset;
mLastFrameTimeNanos = frameTimeNanos;
}
}
}
try {
for (CallbackRecord c = callbacks; c != null; c = c.next) {
// 执行CallbackRecord.run方法
c.run(frameTimeNanos);
}
} finally {
synchronized (mLock) {
mCallbacksRunning = false;
do {
final CallbackRecord next = callbacks.next;
recycleCallbackLocked(callbacks);
callbacks = next;
} while (callbacks != null);
}
}
}
public void run(long frameTimeNanos) {
if (token == FRAME_CALLBACK_TOKEN) {
((FrameCallback)action).doFrame(frameTimeNanos);
} else {
((Runnable)action).run();
}
}
2.4Choreographe.postCallback
token指向RootViewImpl.mTraversalRunnable, 进入到else中调用mTraversalRunnable.run方法TraversalRunnable.run() -> RootViewImpl.doTraversal()
void doTraversal() {
if (mTraversalScheduled) {
mTraversalScheduled = false;
// 1.移除同步栅栏
mHandler.getLooper().getQueue().removeSyncBarrier(mTraversalBarrier);
// 2.开始绘制
performTraversals();
}
}