Android OMX
Android Audio 的播放
AwesomePlayer 中 媒体流buffer 中的传递
在前面分析Stagefright 的播放调用流程中play 会启动一个TimedEventQueue 的线程循环反复。先调用
onVideoEvent 然后 postEventWithDelay(mVideoEvent, 0)到Queue中。
在onVideoEvent 中调用
status_t err = mVideoSource->read(&mVideoBuffer, &options);
mVideoSource 为OMXCodec::Create 返回的OMXCodec类型。需要根据媒体的格式返回对应的OMXCodec。
OMXCodec 函数如下,简化后的主要的流程是:
1. drainInputBuffers(); 使InputBuffer空
2. fillOutputBuffers 填OutputBuffer
3. waitForBufferFilled_l 等待输出buffer 满
4. mFilledBuffers.empty() 判断FilledBuffer是否为空
5. 取出 mPortBuffers kPortIndexOutput 中的mMediaBuffer 赋值给buffer返回
status_t OMXCodec::read(
MediaBuffer **buffer, const ReadOptions *options) {
status_t err = OK;
*buffer = NULL;
if (mInitialBufferSubmit) {
drainInputBuffers();
if (mState == EXECUTING) {
fillOutputBuffers();
}
}
while (mState != ERROR && !mNoMoreOutputData && mFilledBuffers.empty()) {
if ((err = waitForBufferFilled_l()) != OK) {
return err;
}
}
if (mFilledBuffers.empty()) {
return mSignalledEOS ? mFinalStatus : ERROR_END_OF_STREAM;
}
size_t index = *mFilledBuffers.begin();
mFilledBuffers.erase(mFilledBuffers.begin());
BufferInfo *info = &mPortBuffers[ ].editItemAt(index);
*buffer = info->mMediaBuffer;
return OK;
}
还涉及到 mPortBuffers Vector类型数组, mFilledBuffers 类型竟然是 List 。mPortBuffers 大小为2,
mPortBuffers[0] 是输入BufferInfo,mPortBuffers[1] 是输出BufferInfo 。
enum {
kPortIndexInput = 0,
kPortIndexOutput = 1
};
enum PortStatus {
ENABLED,
DISABLING,
DISABLED,
ENABLING,
SHUTTING_DOWN,
};
enum BufferStatus {
OWNED_BY_US,
OWNED_BY_COMPONENT,
OWNED_BY_NATIVE_WINDOW,
OWNED_BY_CLIENT,
};
struct BufferInfo {
IOMX::buffer_id mBuffer;
BufferStatus mStatus;
sp mMem;
size_t mSize;
void *mData;
MediaBuffer *mMediaBuffer;
};
Vector mPortBuffers[2];
List mFilledBuffers;
drainInputBuffers 遍历mPortBuffers[kPortIndexInput],然后调用drainInputBuffer(BufferInfo *info)使每个buffer都空,
在drainInputBuffer(BufferInfo *info)中:
buffer 的共享可以参考下面初始化mPortBuffersstatus_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex)的初始化BufferInfo的情况。最终解码器处理的都是head->pBuffer 这个指针指向的内存。
void OMXCodec::drainInputBuffers() {
CHECK(mState == EXECUTING || mState == RECONFIGURING);
Vector<BufferInfo> *buffers = &mPortBuffers[kPortIndexInput];
for (size_t i = 0; i < buffers->size(); ++i) {
BufferInfo *info = &buffers->editItemAt(i);
if (!drainInputBuffer(info)) {
break;
}
}
}
bool OMXCodec::drainInputBuffer(BufferInfo *info) {
for (;;) {
MediaBuffer *srcBuffer;
if (mSeekTimeUs >= 0) {
err = mSource->read(&srcBuffer, &options);
} else {
err = mSource->read(&srcBuffer);
}
bool releaseBuffer = true;
if (mIsEncoder && (mQuirks & kAvoidMemcopyInputRecordingFrames)) {
CHECK(mOMXLivesLocally && offset == 0);
OMX_BUFFERHEADERTYPE *header =
(OMX_BUFFERHEADERTYPE *)info->mBuffer;
CHECK(header->pBuffer == info->mData);
header->pBuffer =
(OMX_U8 *)srcBuffer->data() + srcBuffer->range_offset();
releaseBuffer = false;
info->mMediaBuffer = srcBuffer;
} else {
if (mFlags & kStoreMetaDataInVideoBuffers) {
releaseBuffer = false;
info->mMediaBuffer = srcBuffer;
}
if (mFlags & kUseSecureInputBuffers) {
// Data in "info" is already provided at this time.
releaseBuffer = false;
CHECK(info->mMediaBuffer == NULL);
info->mMediaBuffer = srcBuffer;
} else {
CHECK(srcBuffer->data() != NULL) ;
memcpy((uint8_t *)info->mData + offset,
(const uint8_t *)srcBuffer->data()
+ srcBuffer->range_offset(),
srcBuffer->range_length());
}
}
}
err = mOMX->emptyBuffer(
mNode, info->mBuffer, 0, offset,
flags, timestampUs);
return true;
}
以MPEG4 为例,看下 mSource->read 的代码,mSource 类型为就是videoTrack,在这里是MPEG4Source:
1. err = mGroup->acquire_buffer(&mBuffer);
2. ssize_t num_bytes_read = mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
3. *out = mBuffer; 赋值返回结果。
==read 函数作用就是从总码流中读取视频帧数据放到buffer 中然后返回。==
status_t MPEG4Source::read(MediaBuffer **out, const ReadOptions *options) {
*out = NULL;
bool newBuffer = false;
if (mBuffer == NULL) {
newBuffer = true;
err = mGroup->acquire_buffer(&mBuffer);
}
if (!mIsAVC || mWantsNALFragments) {
if (newBuffer) {
ssize_t num_bytes_read = mDataSource->readAt(offset, (uint8_t *)mBuffer->data(), size);
}
if (!mIsAVC) {
*out = mBuffer;
mBuffer = NULL;
return OK;
}
MediaBuffer *clone = mBuffer->clone();
*out = clone;
return OK;
} else {
// Whole NAL units are returned but each fragment is prefixed by
// the start code (0x00 00 00 01).
ssize_t num_bytes_read = 0;
int32_t drm = 0;
bool usesDRM = (mFormat->findInt32(kKeyIsDRM, &drm) && drm != 0);
if (usesDRM) {
num_bytes_read =
mDataSource->readAt(offset, (uint8_t*)mBuffer->data(), size);
} else {
num_bytes_read = mDataSource->readAt(offset, mSrcBuffer, size);
}
*out = mBuffer;
mBuffer = NULL;
return OK;
}
}
视频流的数据使用MediaBuffer类型保存。MediaBuffer 四个构造函数,分别可以使用外部传递的mData,自己malloc 的mDate, 使用外部分配的GraphicBuffer,外部分配的ABuffer。
class MediaBuffer {
private:
friend class MediaBufferGroup;
friend class OMXDecoder;
MediaBufferObserver *mObserver;
MediaBuffer *mNextBuffer;
void *mData;
size_t mSize, mRangeOffset, mRangeLength;
sp<GraphicBuffer> mGraphicBuffer;
sp<ABuffer> mBuffer;
bool mOwnsData;
sp<MetaData> mMetaData;
MediaBuffer *mOriginal;
};
MediaBuffer::MediaBuffer(void *data, size_t size)
: mObserver(NULL),
mNextBuffer(NULL),
mRefCount(0),
mData(data),
mSize(size),
mRangeOffset(0),
mRangeLength(size),
mOwnsData(false),
mMetaData(new MetaData),
mOriginal(NULL) {
}
MediaBuffer::MediaBuffer(size_t size)
: mObserver(NULL),
mNextBuffer(NULL),
mRefCount(0),
mData(malloc(size)),
mSize(size),
mRangeOffset(0),
mRangeLength(size),
mOwnsData(true),
mMetaData(new MetaData),
mOriginal(NULL) {
}
MediaBuffer::MediaBuffer(const sp<GraphicBuffer>& graphicBuffer)
: mObserver(NULL),
mNextBuffer(NULL),
mRefCount(0),
mData(NULL),
mSize(1),
mRangeOffset(0),
mRangeLength(mSize),
mGraphicBuffer(graphicBuffer),
mOwnsData(false),
mMetaData(new MetaData),
mOriginal(NULL) {
}
MediaBuffer::MediaBuffer(const sp<ABuffer> &buffer)
: mObserver(NULL),
mNextBuffer(NULL),
mRefCount(0),
mData(buffer->data()),
mSize(buffer->size()),
mRangeOffset(0),
mRangeLength(mSize),
mBuffer(buffer),
mOwnsData(false),
mMetaData(new MetaData),
mOriginal(NULL) {
}
MediaBuffer 申请的时候使用 err = mGroup->acquire_buffer(&mBuffer) 分配,mGroup 为MediaBufferGroup类型,内部维护一个MediaBuffer链表,acquire_buffer的时候从链表中查找空闲buffer.在 MPEG4Source:: start(MetaData *params) 的时候会new 一个MediaBuffer 出来。MediaBuffer 实际是通过MediaBufferGroup来管理使用。
class MediaBufferGroup : public MediaBufferObserver {
public:
MediaBufferGroup();
~MediaBufferGroup();
void add_buffer(MediaBuffer *buffer);
// Blocks until a buffer is available and returns it to the caller,
// the returned buffer will have a reference count of 1.
status_t acquire_buffer(MediaBuffer **buffer);
protected:
virtual void signalBufferReturned(MediaBuffer *buffer);
private:
friend class MediaBuffer;
Mutex mLock;
Condition mCondition;
MediaBuffer *mFirstBuffer, *mLastBuffer;
MediaBufferGroup(const MediaBufferGroup &);
MediaBufferGroup &operator=(const MediaBufferGroup &);
};
MediaBufferGroup::MediaBufferGroup()
: mFirstBuffer(NULL),
mLastBuffer(NULL) {
}
MediaBufferGroup::~MediaBufferGroup() {
MediaBuffer *next;
for (MediaBuffer *buffer = mFirstBuffer; buffer != NULL;
buffer = next) {
next = buffer->nextBuffer();
CHECK_EQ(buffer->refcount(), 0);
buffer->setObserver(NULL);
buffer->release();
}
}
void MediaBufferGroup::add_buffer(MediaBuffer *buffer) {
Mutex::Autolock autoLock(mLock);
buffer->setObserver(this);
if (mLastBuffer) {
mLastBuffer->setNextBuffer(buffer);
} else {
mFirstBuffer = buffer;
}
mLastBuffer = buffer;
}
status_t MediaBufferGroup::acquire_buffer(MediaBuffer **out) {
Mutex::Autolock autoLock(mLock);
for (;;) {
for (MediaBuffer *buffer = mFirstBuffer;
buffer != NULL; buffer = buffer->nextBuffer()) {
if (buffer->refcount() == 0) {
buffer->add_ref();
buffer->reset();
*out = buffer;
goto exit;
}
}
// All buffers are in use. Block until one of them is returned to us.
mCondition.wait(mLock);
}
exit:
return OK;
}
void MediaBufferGroup::signalBufferReturned(MediaBuffer *) {
Mutex::Autolock autoLock(mLock);
mCondition.signal();
}
status_t MPEG4Source::start(MetaData *params) {
Mutex::Autolock autoLock(mLock);
mGroup = new MediaBufferGroup;
int32_t max_size;
CHECK(mFormat->findInt32(kKeyMaxInputSize, &max_size));
mGroup->add_buffer(new MediaBuffer(max_size));
mSrcBuffer = new uint8_t[max_size];
mStarted = true;
return OK;
}
mPortBuffers 初始化在OMXCodec::allocateNode 的时候调用OMXCodec::allocateBuffersOnPort。首先向解码器查询参数,获取需要的buffer的大小和数量
status_t OMXCodec::allocateBuffersOnPort(OMX_U32 portIndex) {
if (mNativeWindow != NULL && portIndex == kPortIndexOutput) {
return allocateOutputBuffersFromNativeWindow();
}
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = portIndex;
err = mOMX->getParameter(
mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
size_t totalSize = def.nBufferCountActual * def.nBufferSize;
mDealer[portIndex] = new MemoryDealer(totalSize, "OMXCodec");
for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
sp mem = mDealer[portIndex]->allocate(def.nBufferSize);
CHECK(mem.get() != NULL);
BufferInfo info;
info.mData = NULL;
info.mSize = def.nBufferSize;
IOMX::buffer_id buffer;
if (portIndex == kPortIndexInput
&& ((mQuirks & kRequiresAllocateBufferOnInputPorts)
|| (mFlags & kUseSecureInputBuffers))) {
if (mOMXLivesLocally) {
mem.clear();
err = mOMX->allocateBuffer(
mNode, portIndex, def.nBufferSize, &buffer,
&info.mData);
} else {
err = mOMX->allocateBufferWithBackup(
mNode, portIndex, mem, &buffer);
}
} else if (portIndex == kPortIndexOutput
&& (mQuirks & kRequiresAllocateBufferOnOutputPorts)) {
if (mOMXLivesLocally) {
mem.clear();
err = mOMX->allocateBuffer(
mNode, portIndex, def.nBufferSize, &buffer,
&info.mData);
} else {
err = mOMX->allocateBufferWithBackup(
mNode, portIndex, mem, &buffer);
}
} else {
err = mOMX->useBuffer(mNode, portIndex, mem, &buffer);
}
if (mem != NULL) {
info.mData = mem->pointer();
}
info.mBuffer = buffer;
info.mStatus = OWNED_BY_US;
info.mMem = mem;
info.mMediaBuffer = NULL;
mPortBuffers[portIndex].push(info);
}
if (portIndex == kPortIndexInput && (mFlags & kUseSecureInputBuffers)) {
Vector buffers;
for (size_t i = 0; i < def.nBufferCountActual; ++i) {
const BufferInfo &info = mPortBuffers[kPortIndexInput].itemAt(i);
MediaBuffer *mbuf = new MediaBuffer(info.mData, info.mSize);
buffers.push(mbuf);
}
status_t err = mSource->setBuffers(buffers);
}
return OK;
}
status_t OMXNodeInstance::allocateBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
void **buffer_data) {
Mutex::Autolock autoLock(mLock);
BufferMeta *buffer_meta = new BufferMeta(size);
OMX_BUFFERHEADERTYPE *header;
OMX_ERRORTYPE err = OMX_AllocateBuffer(
mHandle, &header, portIndex, buffer_meta, size);
CHECK_EQ(header->pAppPrivate, buffer_meta);
*buffer = header;
*buffer_data = header->pBuffer;
return OK;
}
fillOutputBuffers 和emptyBuffer情况类似。可以看下源码。
void OMXCodec::fillOutputBuffers() {
CHECK_EQ((int)mState, (int)EXECUTING);
// This is a workaround for some decoders not properly reporting
// end-of-output-stream. If we own all input buffers and also own
// all output buffers and we already signalled end-of-input-stream,
// the end-of-output-stream is implied.
if (mSignalledEOS
&& countBuffersWeOwn(mPortBuffers[kPortIndexInput])
== mPortBuffers[kPortIndexInput].size()
&& countBuffersWeOwn(mPortBuffers[kPortIndexOutput])
== mPortBuffers[kPortIndexOutput].size()) {
mNoMoreOutputData = true;
mBufferFilled.signal();
return;
}
Vector *buffers = &mPortBuffers[kPortIndexOutput];
for (size_t i = 0; i < buffers->size(); ++i) {
BufferInfo *info = &buffers->editItemAt(i);
if (info->mStatus == OWNED_BY_US) {
fillOutputBuffer(&buffers->editItemAt(i));
}
}
}
void OMXCodec::fillOutputBuffer(BufferInfo *info) {
if (info->mMediaBuffer != NULL) {
sp graphicBuffer = info->mMediaBuffer->graphicBuffer();
if (graphicBuffer != 0) {
// When using a native buffer we need to lock the buffer before
// giving it to OMX.
CODEC_LOGV("Calling lockBuffer on %p", info->mBuffer);
int err = mNativeWindow->lockBuffer(mNativeWindow.get(),
graphicBuffer.get());
}
}
CODEC_LOGV("Calling fillBuffer on buffer %p", info->mBuffer);
status_t err = mOMX->fillBuffer(mNode, info->mBuffer);
info->mStatus = OWNED_BY_COMPONENT;
}