这一篇文章接着之前的prepare,讲play的流程
前面的流程省略,直接从AwesomePlayer的Play()开始讲。
status_t AwesomePlayer::play() {
……
return play_l();
}
status_t AwesomePlayer::play_l() {
……
if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
createAudioPlayer_l();
}
if (mVideoSource == NULL) {
status_t err = startAudioPlayer_l(
false /* sendErrorNotification */);
……;
}
}
if (mVideoSource != NULL) {
// Kick off video playback
postVideoEvent_l();
if (mAudioSource != NULL && mVideoSource != NULL) {
postVideoLagEvent_l();
}
}
……
}
如果是播放AVI、MP4等视频文件,则m_AudioSource和mVideoSource都不为null,如果是播放MP3文件,则m_AudioSource不为null而mVideoSource为null。现在按照播放视频文件的流程来分析。
先看createAudioPlayer_1();void AwesomePlayer::createAudioPlayer_l()
{
……
mAudioPlayer = new AudioPlayer(mAudioSink, flags, this);
mAudioPlayer->setSource(mAudioSource);
……
}
可以看到,创建了audioPlayer,且把mAudioSource传给了创建的mAudioPlayer。
接着看postVideoEvent_lvoid AwesomePlayer::postVideoEvent_l(int64_t delayUs) {
ATRACE_CALL();
if (mVideoEventPending) {
return;
}
mVideoEventPending = true;
mQueue.postEventWithDelay(mVideoEvent, delayUs < 0 ? 10000 : delayUs);
}
有关TimedEventQueue的介绍,在上一篇TimedEventQueue分析。mVideoEvent在AwesomePlayer的构造函数中定义,如下:
mVideoEvent = new AwesomeEvent(this,&AwesomePlayer::onVideoEvent);
所以,mQueue.postEventWithDelay()后,将会执行到onVideoEventvoid AwesomePlayer::onVideoEvent() {
……
if (!mVideoBuffer) {
……
for (;;) {
status_t err = mVideoSource->read(&mVideoBuffer, &options);
……
if (mVideoBuffer->range_length() == 0) {
mVideoBuffer->release();
mVideoBuffer = NULL;
continue;
}
break;
}
{
Mutex::Autolock autoLock(mStatsLock);
++mStats.mNumVideoFramesDecoded;
}
}
……
if (mAudioPlayer != NULL && !(mFlags & (AUDIO_RUNNING | SEEK_PREVIEW))) {
status_t err = startAudioPlayer_l();
}
if ((mFlags & TEXTPLAYER_INITIALIZED)
&& !(mFlags & (TEXT_RUNNING | SEEK_PREVIEW))) {
mTextDriver->start();
modifyFlags(TEXT_RUNNING, SET);
}
……
if ((mNativeWindow != NULL)
&& (mVideoRendererIsPreview || mVideoRenderer == NULL)) {
mVideoRendererIsPreview = false;
initRenderer_l();
}
if (mVideoRenderer != NULL) {
mSinceLastDropped++;
mVideoRenderer->render(mVideoBuffer);
if (!mVideoRenderingStarted) {
mVideoRenderingStarted = true;
notifyListener_l(MEDIA_INFO, MEDIA_INFO_RENDERING_START);
}
if (mFlags & PLAYING) {
notifyIfMediaStarted_l();
}
}
……
postVideoEvent_l();
}
接下来分步分析
step1 : status_t err= mVideoSource->read(&mVideoBuffer, &options); 这个方法的作用就是从数据源读取数据,然后将数据送给解码器解码,最后解码器将解码好的数据输出,并通过输出参数buffer传出,为后面的render所用。status_t OMXCodec::read(
MediaBuffer **buffer, const ReadOptions *options) {
……
if (mInitialBufferSubmit) { // only run once,之后通过解码器调用之前设置的回调函数来调用drainInputBuffers来获取原始视频数据
mInitialBufferSubmit = false;
……
drainInputBuffers(); // 通过videoTrack读取数据后将数据送给解码器处理
if (mState == EXECUTING) {
// Otherwise mState == RECONFIGURING and this code will trigger
// after the output port is reenabled.
fillOutputBuffers(); // 向解码器请求输出数据
}
}
……
// 等待解码器输出解码完成
while (mState != ERROR && !mNoMoreOutputData && mFilledBuffers.empty()) {
if ((err = waitForBufferFilled_l()) != OK) {
return err;
}
}
……
*buffer = info->mMediaBuffer; // 输出数据给AwesomePlayer
return OK;
}
drainInputBuffers()和fillOutputBuffers()这两个函数后面再分析,先接着看onVideoEvent里面的step2 : status_t err = startAudioPlayer_l();
status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) {
……
err = mAudioPlayer->start(
true /* sourceAlreadyStarted */);
……
return err;
}
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
……
mAudioTrack = new AudioTrack(AUDIO_STREAM_MUSIC,
mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
……
// start之后,mAudioTrack开始播放并通过回调函数AudioCallback来获取音频数据
mAudioTrack->start();
……
return OK;
}
void AudioPlayer::AudioCallback(int event, void *info) {
switch (event) {
case AudioTrack::EVENT_MORE_DATA:
{
AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
// fillBuffer通过mSource->read(&mInputBuffer, &options)读取数据,mSource为audioTrack
size_t numBytesWritten = fillBuffer(buffer->raw, buffer->size);
buffer->size = numBytesWritten;
}
break;
case AudioTrack::EVENT_STREAM_END:
mReachedEOS = true;
notifyAudioEOS();
break;
}
}
Step3 : initRenderer_l() 创建render
void AwesomePlayer::initRenderer_l() {
……
if (USE_SURFACE_ALLOC
&& !strncmp(component, "OMX.", 4)
&& strncmp(component, "OMX.google.", 11)) {
// Hardware decoders avoid the CPU color conversion by decoding
// directly to ANativeBuffers, so we must use a renderer that
// just pushes those buffers to the ANativeWindow.
// 创建硬件加速渲染的render
mVideoRenderer =
new AwesomeNativeWindowRenderer(mNativeWindow, rotationDegrees);
} else {
// Other decoders are instantiated locally and as a consequence
// allocate their buffers in local address space. This renderer
// then performs a color conversion and copy to get the data
// into the ANativeBuffer.
// 创建软件渲染的render
mVideoRenderer = new AwesomeLocalRenderer(mNativeWindow, meta);
}
}
Step4 : mVideoRenderer->render(mVideoBuffer);
就是将mVideoBuffer的内容输出到屏幕
到现在为止,onVideoEvent流程分析完成,接下来详细分析 drainInputBuffers和 fillOutputBuffers,这两个函数都是通过OMX和解码器通讯的函数。bool OMXCodec::drainInputBuffer(BufferInfo *info) {
……
// mSource为mVideoTrack,通过read方法读取原始视频数据
err = mSource->read(&srcBuffer);
……
// 通过OMX的emptyBuffer调用解码库解码数据
err = mOMX->emptyBuffer(
mNode, info->mBuffer, 0, offset,
flags, timestampUs);
info->mStatus = OWNED_BY_COMPONENT;
return true;
}
void OMXCodec::fillOutputBuffer(BufferInfo *info) {
// 通过调用OMX的fillBuffer来获取解码后的帧数据
status_t err = mOMX->fillBuffer(mNode, info->mBuffer);
info->mStatus = OWNED_BY_COMPONENT;
}
貌似很简单,其实调用解码器的这个流程还是蛮复杂的,下面以emptyBuffer为例来梳理一下:
首先,mOMX为OMX的Bp端,其Bn端的实现在OMX.cpp里面status_t OMX::emptyBuffer(
node_id node,
buffer_id buffer,
OMX_U32 range_offset, OMX_U32 range_length,
OMX_U32 flags, OMX_TICKS timestamp) {
return findInstance(node)->emptyBuffer(
buffer, range_offset, range_length, flags, timestamp);
}
再看findInstance(node),node是prepare时调用OMX的allocateNode方法得到的,findInstance将会根据这个node值返回之前创建的OMXNodeInstance对象,其emptyBuffer定义如下:
status_t OMXNodeInstance::emptyBuffer(
OMX::buffer_id buffer,
OMX_U32 rangeOffset, OMX_U32 rangeLength,
OMX_U32 flags, OMX_TICKS timestamp) {
Mutex::Autolock autoLock(mLock);
OMX_BUFFERHEADERTYPE *header = (OMX_BUFFERHEADERTYPE *)buffer;
header->nFilledLen = rangeLength;
header->nOffset = rangeOffset;
header->nFlags = flags;
header->nTimeStamp = timestamp;
BufferMeta *buffer_meta =
static_cast(header->pAppPrivate);
buffer_meta->CopyToOMX(header);
OMX_ERRORTYPE err = OMX_EmptyThisBuffer(mHandle, header);
return StatusFromOMXError(err);
}
#define OMX_EmptyThisBuffer( \
hComponent, \
pBuffer) \
((OMX_COMPONENTTYPE*)hComponent)->EmptyThisBuffer( \
hComponent, \
pBuffer) /* Macro End */
从代码中,我们可以看出实际上是调用了mHandle的EmptyThisBuffer方法,mHandle是什么呢?在prepare过程中,我们调用了OMX的allocateNode方法,在这个方法里面,我们通过调用OMXNodeInstance的setHandle方法设置了其值。再回顾一下其实现:
status_t OMX::allocateNode(
const char *name, const sp &observer, node_id *node) {
Mutex::Autolock autoLock(mLock);
*node = 0;
// 创建instance
OMXNodeInstance *instance = new OMXNodeInstance(this, observer);
// 创建解码器组件,并将其值赋值给handle
OMX_COMPONENTTYPE *handle;
OMX_ERRORTYPE err = mMaster->makeComponentInstance(
name, &OMXNodeInstance::kCallbacks,
instance, &handle);
if (err != OMX_ErrorNone) {
ALOGV("FAILED to allocate omx component '%s'", name);
instance->onGetHandleFailed();
return UNKNOWN_ERROR;
}
*node = makeNodeID(instance);
mDispatchers.add(*node, new CallbackDispatcher(instance));
instance->setHandle(*node, handle);
mLiveNodes.add(observer->asBinder(), instance);
observer->asBinder()->linkToDeath(this);
return OK;
}
void OMXNodeInstance::setHandle(OMX::node_id node_id, OMX_HANDLETYPE handle) {
CHECK(mHandle == NULL);
mNodeID = node_id;
mHandle = handle;
}
OK,至此整个发送数据到解码器的流程就跑通了,但是前面也说了,在OMXCodec的read方法中,drainInputBuffers方法只会执行一次,之后要再往解码器送数据该怎么弄呢?就要靠回调了。一起来看一下这个回调的流程是怎么弄的吧?
在OMX的allocateNode方法中
OMX_COMPONENTTYPE *handle;
OMX_ERRORTYPE err = mMaster->makeComponentInstance(
name, &OMXNodeInstance::kCallbacks,
instance, &handle);
创建解码器时,就注册了回调函数
OMX_CALLBACKTYPE OMXNodeInstance::kCallbacks = {
&OnEvent, &OnEmptyBufferDone, &OnFillBufferDone
};
当解码器将原始数据全部解码完成后,就会调用OMXNodeInstance 的OnEmptyBufferDone方法
OMX_ERRORTYPE OMXNodeInstance::OnEmptyBufferDone(
OMX_IN OMX_HANDLETYPE hComponent,
OMX_IN OMX_PTR pAppData,
OMX_IN OMX_BUFFERHEADERTYPE* pBuffer) {
OMXNodeInstance *instance = static_cast(pAppData);
if (instance->mDying) {
return OMX_ErrorNone;
}
return instance->owner()->OnEmptyBufferDone(instance->nodeID(), pBuffer);
}
Instance->owner()实际上就是OMX,再看OMX的OnEmptyBufferDone
OMX_ERRORTYPE OMX::OnEmptyBufferDone(
node_id node, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) {
ALOGV("OnEmptyBufferDone buffer=%p", pBuffer);
omx_message msg;
msg.type = omx_message::EMPTY_BUFFER_DONE;
msg.node = node;
msg.u.buffer_data.buffer = pBuffer;
findDispatcher(node)->post(msg);
return OMX_ErrorNone;
}
findDispatcher返回CallbackDispatcher对象,是OMX的内部类,也是在OMX的allocate方法中创建的,CallbackDispatcher在创建时启动了一个线程,不断的获取消息,然后调用dispatch方法执行之,看dispatch方法:
void OMX::CallbackDispatcher::dispatch(const omx_message &msg) {
if (mOwner == NULL) {
ALOGV("Would have dispatched a message to a node that's already gone.");
return;
}
mOwner->onMessage(msg);
}
mOwner为OMXNodeInstance对象,所以看OMXNodeInstance的onMessage方法。
void OMXNodeInstance::onMessage(const omx_message &msg) {
……
mObserver->onMessage(msg);
}
mObserver为OMXCodecObserver对象,是在prepare时在OMXCodec::Create中创建,下面看其onMessage方法实现
virtual void onMessage(const omx_message &msg) {
sp codec = mTarget.promote();
if (codec.get() != NULL) {
Mutex::Autolock autoLock(codec->mLock);
codec->on_message(msg);
codec.clear();
}
}
codec的值为OMXCodec对象,看其on_message的实现
void OMXCodec::on_message(const omx_message &msg) {
……
switch (msg.type) {
……
case omx_message::EMPTY_BUFFER_DONE:
{
……
if (mPortStatus[kPortIndexInput] == DISABLING) {
……
} else if (mState != ERROR
&& mPortStatus[kPortIndexInput] != SHUTTING_DOWN) {
CHECK_EQ((int)mPortStatus[kPortIndexInput], (int)ENABLED);
if (mFlags & kUseSecureInputBuffers) {
drainAnyInputBuffer();
} else {
// 看,数据用完后又开始要新的数据了
drainInputBuffer(&buffers->editItemAt(i));
}
}
break;
}
case omx_message::FILL_BUFFER_DONE:
{
// 可以看到,FILL_BUFFER_DONE的回调流程和EMPTY_BUFFER_DONE基本一样
// 这里做的主要的事情就是通知AwesomePlayer解码库有解码好的帧输出,可以播放了
……
}
}
}
全文完。