//下面是一个典型的播放序列:
MediaPlayer player=new MediaPlayer()
player->setDataSource(url,header);
player->prepare();
player->start();
...
在player阶段开始创建AudioPlayer对象,同时传入AudioSink作为输出,实际上是AudioOutput对象
并启动AudioOutput对象
status_t AwesomePlayer::play_l() {
modifyFlags(SEEK_PREVIEW, CLEAR);
if (mFlags & PLAYING) {
return OK;
}
if (!(mFlags & PREPARED)) {
status_t err = prepare_l();
if (err != OK) {
return err;
}
}
modifyFlags(PLAYING, SET);
modifyFlags(FIRST_FRAME, SET);
if (mDecryptHandle != NULL) {
int64_t position;
getPosition(&position);
mDrmManagerClient->setPlaybackStatus(mDecryptHandle,
Playback::START, position / 1000);
}
if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
if (mAudioSink != NULL) {
bool allowDeepBuffering;
int64_t cachedDurationUs;
bool eos;
if (mVideoSource == NULL
&& (mDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US ||
(getCachedDuration_l(&cachedDurationUs, &eos) &&
cachedDurationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US))) {
allowDeepBuffering = true;
} else {
allowDeepBuffering = false;
}
//创建AudioPlayer对象,将mAudioSink作为输出
mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
mAudioPlayer->setSource(mAudioSource);
mTimeSource = mAudioPlayer;
// If there was a seek request before we ever started,
// honor the request now.
// Make sure to do this before starting the audio player
// to avoid a race condition.
seekAudioIfNecessary_l();
}
}
CHECK(!(mFlags & AUDIO_RUNNING));
if (mVideoSource == NULL) {
// We don't want to post an error notification at this point,
// the error returned from MediaPlayer::start() will suffice.
// 开始启动AudioPlayer
status_t err = startAudioPlayer_l(
false /* sendErrorNotification */);
if (err != OK) {
delete mAudioPlayer;
mAudioPlayer = NULL;
modifyFlags((PLAYING | FIRST_FRAME), CLEAR);
if (mDecryptHandle != NULL) {
mDrmManagerClient->setPlaybackStatus(
mDecryptHandle, Playback::STOP, 0);
}
return err;
}
}
}
if (mTimeSource == NULL && mAudioPlayer == NULL) {
mTimeSource = &mSystemTimeSource;
}
if (mVideoSource != NULL) {
// Kick off video playback
postVideoEvent_l();
if (mAudioSource != NULL && mVideoSource != NULL) {
postVideoLagEvent_l();
}
}
if (mFlags & AT_EOS) {
// Legacy behaviour, if a stream finishes playing and then
// is started again, we play from the start...
seekTo_l(0);
}
uint32_t params = IMediaPlayerService::kBatteryDataCodecStarted
| IMediaPlayerService::kBatteryDataTrackDecoder;
if ((mAudioSource != NULL) && (mAudioSource != mAudioTrack)) {
params |= IMediaPlayerService::kBatteryDataTrackAudio;
}
if (mVideoSource != NULL) {
params |= IMediaPlayerService::kBatteryDataTrackVideo;
}
addBatteryData(params);
return OK;
}
在setDataSource阶段调用时创建的AudioOutput对象,并赋值给AwesomePlayer对象的
mAudioSink变量和MediaPlayer对象的mAudioOutput变量
sp MediaPlayerService::Client::setDataSource_pre(
player_type playerType)
{
ALOGV("player type = %d", playerType);
// create the right type of player
sp p = createPlayer(playerType);
if (p == NULL) {
return p;
}
if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput(mAudioSessionId);
static_cast(p.get())->setAudioSink(mAudioOutput);
}
return p;
}
在player阶段构造完AudioPlayer之后,启动AudioPlayer对象
status_t AwesomePlayer::startAudioPlayer_l(bool sendErrorNotification) {
CHECK(!(mFlags & AUDIO_RUNNING));
if (mAudioSource == NULL || mAudioPlayer == NULL) {
return OK;
}
if (!(mFlags & AUDIOPLAYER_STARTED)) {
//
bool wasSeeking = mAudioPlayer->isSeeking();
// We've already started the MediaSource in order to enable
// the prefetcher to read its data.
status_t err = mAudioPlayer->start(
true /* sourceAlreadyStarted */);
if (err != OK) {
if (sendErrorNotification) {
notifyListener_l(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
}
return err;
}
modifyFlags(AUDIOPLAYER_STARTED, SET);
if (wasSeeking) {
CHECK(!mAudioPlayer->isSeeking());
// We will have finished the seek while starting the audio player.
postAudioSeekComplete();
}
} else {
mAudioPlayer->resume();
}
modifyFlags(AUDIO_RUNNING, SET);
mWatchForAudioEOS = true;
return OK;
}
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
CHECK(!mStarted);
CHECK(mSource != NULL);
status_t err;
if (!sourceAlreadyStarted) {
err = mSource->start();
if (err != OK) {
return err;
}
}
// We allow an optional INFO_FORMAT_CHANGED at the very beginning
// of playback, if there is one, getFormat below will retrieve the
// updated format, if there isn't, we'll stash away the valid buffer
// of data to be used on the first audio callback.
CHECK(mFirstBuffer == NULL);
MediaSource::ReadOptions options;
if (mSeeking) {
options.setSeekTo(mSeekTimeUs);
mSeeking = false;
}
mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
ALOGV("INFO_FORMAT_CHANGED!!!");
CHECK(mFirstBuffer == NULL);
mFirstBufferResult = OK;
mIsFirstBuffer = false;
} else {
mIsFirstBuffer = true;
}
sp format = mSource->getFormat();
const char *mime;
bool success = format->findCString(kKeyMIMEType, &mime);
CHECK(success);
CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW));
success = format->findInt32(kKeySampleRate, &mSampleRate);
CHECK(success);
int32_t numChannels, channelMask;
success = format->findInt32(kKeyChannelCount, &numChannels);
CHECK(success);
if(!format->findInt32(kKeyChannelMask, &channelMask)) {
// log only when there's a risk of ambiguity of channel mask selection
ALOGI_IF(numChannels > 2,
"source format didn't specify channel mask, using (%d) channel order", numChannels);
channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
}
if (mAudioSink.get() != NULL) {
//启动AudioOutput对象,并注册回调函数,当AudioOutput输出完缓冲区里
//的数据就会通过回调函数通知AudioPlayer填充数据
status_t err = mAudioSink->open(
mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT,
DEFAULT_AUDIOSINK_BUFFERCOUNT,
&AudioPlayer::AudioSinkCallback,
this,
(mAllowDeepBuffering ?
AUDIO_OUTPUT_FLAG_DEEP_BUFFER :
AUDIO_OUTPUT_FLAG_NONE));
if (err != OK) {
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}
if (!sourceAlreadyStarted) {
mSource->stop();
}
return err;
}
mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
mFrameSize = mAudioSink->frameSize();
mAudioSink->start();
} else {
// playing to an AudioTrack, set up mask if necessary
audio_channel_mask_t audioMask = channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER ?
audio_channel_out_mask_from_count(numChannels) : channelMask;
if (0 == audioMask) {
return BAD_VALUE;
}
mAudioTrack = new AudioTrack(
AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
if ((err = mAudioTrack->initCheck()) != OK) {
delete mAudioTrack;
mAudioTrack = NULL;
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}
if (!sourceAlreadyStarted) {
mSource->stop();
}
return err;
}
mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
mFrameSize = mAudioTrack->frameSize();
// 实际调用AudioSink的实现类AudioOutput::start()
// AudioOutput::start()又调用AudioTrack::start()开始输出数据
mAudioTrack->start();
}
mStarted = true;
mPinnedTimeUs = -1ll;
return OK;
}
size_t AudioPlayer::AudioSinkCallback(
MediaPlayerBase::AudioSink *audioSink,
void *buffer, size_t size, void *cookie) {
AudioPlayer *me = (AudioPlayer *)cookie;
return me->fillBuffer(buffer, size);
}
对于fillBuffer函数,需要关注数据的输入和数据的输出,通过分析可以发现数据的输出就是AudioOutput对象
而数据的输入则是mSource对象,但是需要继续分析该对象的由来。
在有数据到来时,循环调用此callback函数调用fillBuffer函数进行填充数据
此函数的返回值size_done,代表已经处理的数据总大小,与传递进来的数据size(第二个参数)不一定相同
size_t AudioPlayer::fillBuffer(void *data, size_t size) {
if (mNumFramesPlayed == 0) {
ALOGV("AudioCallback");
}
if (mReachedEOS) {
return 0;
}
bool postSeekComplete = false;
bool postEOS = false;
int64_t postEOSDelayUs = 0;
size_t size_done = 0;
size_t size_remaining = size;
while (size_remaining > 0) {
MediaSource::ReadOptions options;
{
Mutex::Autolock autoLock(mLock);
if (mSeeking) {
if (mIsFirstBuffer) {
if (mFirstBuffer != NULL) {
mFirstBuffer->release();
mFirstBuffer = NULL;
}
mIsFirstBuffer = false;
}
options.setSeekTo(mSeekTimeUs);
if (mInputBuffer != NULL) {
mInputBuffer->release();
mInputBuffer = NULL;
}
mSeeking = false;
if (mObserver) {
postSeekComplete = true;
}
}
}
if (mInputBuffer == NULL) {
status_t err;
if (mIsFirstBuffer) {
mInputBuffer = mFirstBuffer;
mFirstBuffer = NULL;
err = mFirstBufferResult;
mIsFirstBuffer = false;
} else {
//读取数据源AudioDecoder的数据
err = mSource->read(&mInputBuffer, &options);
}
CHECK((err == OK && mInputBuffer != NULL)
|| (err != OK && mInputBuffer == NULL));
Mutex::Autolock autoLock(mLock);
if (err != OK) {
if (mObserver && !mReachedEOS) {
// We don't want to post EOS right away but only
// after all frames have actually been played out.
// These are the number of frames submitted to the
// AudioTrack that you haven't heard yet.
uint32_t numFramesPendingPlayout =
getNumFramesPendingPlayout();
// These are the number of frames we're going to
// submit to the AudioTrack by returning from this
// callback.
uint32_t numAdditionalFrames = size_done / mFrameSize;
numFramesPendingPlayout += numAdditionalFrames;
int64_t timeToCompletionUs =
(1000000ll * numFramesPendingPlayout) / mSampleRate;
ALOGV("total number of frames played: %lld (%lld us)",
(mNumFramesPlayed + numAdditionalFrames),
1000000ll * (mNumFramesPlayed + numAdditionalFrames)
/ mSampleRate);
ALOGV("%d frames left to play, %lld us (%.2f secs)",
numFramesPendingPlayout,
timeToCompletionUs, timeToCompletionUs / 1E6);
postEOS = true;
if (mAudioSink->needsTrailingPadding()) {
postEOSDelayUs = timeToCompletionUs + mLatencyUs;
} else {
postEOSDelayUs = 0;
}
}
mReachedEOS = true;
mFinalStatus = err;
break;
}
if (mAudioSink != NULL) {
mLatencyUs = (int64_t)mAudioSink->latency() * 1000;
} else {
mLatencyUs = (int64_t)mAudioTrack->latency() * 1000;
}
//取得一帧数据在媒体文件中存储的时间戳mPositionTimeMediaUs
CHECK(mInputBuffer->meta_data()->findInt64(
kKeyTime, &mPositionTimeMediaUs));
//计算一帧数据实际播放位置的时间戳
mPositionTimeRealUs =
((mNumFramesPlayed + size_done / mFrameSize) * 1000000)
/ mSampleRate;
//这两个时间戳,在AwesomePlayer::onVideoEvent()中,用于计算音视频同步的依据
ALOGV("buffer->size() = %d, "
"mPositionTimeMediaUs=%.2f mPositionTimeRealUs=%.2f",
mInputBuffer->range_length(),
mPositionTimeMediaUs / 1E6, mPositionTimeRealUs / 1E6);
}
if (mInputBuffer->range_length() == 0) {
mInputBuffer->release();
mInputBuffer = NULL;
continue;
}
size_t copy = size_remaining;
if (copy > mInputBuffer->range_length()) {
copy = mInputBuffer->range_length();
}
memcpy((char *)data + size_done,
(const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
copy);
mInputBuffer->set_range(mInputBuffer->range_offset() + copy,
mInputBuffer->range_length() - copy);
size_done += copy;
size_remaining -= copy;
}
{
Mutex::Autolock autoLock(mLock);
mNumFramesPlayed += size_done / mFrameSize;
mNumFramesPlayedSysTimeUs = ALooper::GetNowUs();
if (mReachedEOS) {
mPinnedTimeUs = mNumFramesPlayedSysTimeUs;
} else {
mPinnedTimeUs = -1ll;
}
}
if (postEOS) {
mObserver->postAudioEOS(postEOSDelayUs);
}
if (postSeekComplete) {
mObserver->postAudioSeekComplete();
}
return size_done;
}
在prepare阶段,通过分析发现数据的输入mSource是Decoder之后的数据,该数据是经过OMXCodec构造的Decoder
status_t AwesomePlayer::prepareAsync_l() {
modifyFlags(PREPARING, SET);
mAsyncPrepareEvent = new AwesomeEvent(
this, &AwesomePlayer::onPrepareAsyncEvent);
mQueue.postEvent(mAsyncPrepareEvent);
return OK;
}
void AwesomePlayer::onPrepareAsyncEvent() {
Mutex::Autolock autoLock(mLock);
if (mVideoTrack != NULL && mVideoSource == NULL) {
status_t err = initVideoDecoder();
if (err != OK) {
abortPrepare(err);
return;
}
}
if (mAudioTrack != NULL && mAudioSource == NULL) {
status_t err = initAudioDecoder();
if (err != OK) {
abortPrepare(err);
return;
}
}
}
status_t AwesomePlayer::initAudioDecoder() {
ATRACE_CALL();
sp meta = mAudioTrack->getFormat();
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
mAudioSource = mAudioTrack;
} else {
// 根据mAudioTrack提取文件的格式生成Decoder
mAudioSource = OMXCodec::Create(
mClient.interface(), mAudioTrack->getFormat(),
false, // createEncoder
mAudioTrack);
}
if (mAudioSource != NULL) {
int64_t durationUs;
if (mAudioTrack->getFormat()->findInt64(kKeyDuration, &durationUs)) {
Mutex::Autolock autoLock(mMiscStateLock);
if (mDurationUs < 0 || durationUs > mDurationUs) {
mDurationUs = durationUs;
}
}
status_t err = mAudioSource->start();
if (err != OK) {
mAudioSource.clear();
return err;
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
// For legacy reasons we're simply going to ignore the absence
// of an audio decoder for QCELP instead of aborting playback
// altogether.
return OK;
}
if (mAudioSource != NULL) {
Mutex::Autolock autoLock(mStatsLock);
TrackStat *stat = &mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
const char *component;
if (!mAudioSource->getFormat()
->findCString(kKeyDecoderComponent, &component)) {
component = "none";
}
stat->mDecoderName = component;
}
return mAudioSource != NULL ? OK : UNKNOWN_ERROR;
}
void AwesomePlayer::setAudioSource(sp source) {
CHECK(source != NULL);
mAudioTrack = source;
}
分析mAudioTack的由来,是在AwesomePlayer设置数据源的时候通过extractor解析出来的
status_t AwesomePlayer::setDataSource_l(const sp &extractor) {
// Attempt to approximate overall stream bitrate by summing all
// tracks' individual bitrates, if not all of them advertise bitrate,
// we have to fail.
int64_t totalBitRate = 0;
mExtractor = extractor;
for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp meta = extractor->getTrackMetaData(i);
int32_t bitrate;
if (!meta->findInt32(kKeyBitRate, &bitrate)) {
const char *mime;
CHECK(meta->findCString(kKeyMIMEType, &mime));
ALOGV("track of type '%s' does not publish bitrate", mime);
totalBitRate = -1;
break;
}
totalBitRate += bitrate;
}
mBitrate = totalBitRate;
ALOGV("mBitrate = %lld bits/sec", mBitrate);
{
Mutex::Autolock autoLock(mStatsLock);
mStats.mBitrate = mBitrate;
mStats.mTracks.clear();
mStats.mAudioTrackIndex = -1;
mStats.mVideoTrackIndex = -1;
}
bool haveAudio = false;
bool haveVideo = false;
for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp meta = extractor->getTrackMetaData(i);
const char *_mime;
CHECK(meta->findCString(kKeyMIMEType, &_mime));
String8 mime = String8(_mime);
if (!haveVideo && !strncasecmp(mime.string(), "video/", 6)) {
setVideoSource(extractor->getTrack(i));
haveVideo = true;
// Set the presentation/display size
int32_t displayWidth, displayHeight;
bool success = meta->findInt32(kKeyDisplayWidth, &displayWidth);
if (success) {
success = meta->findInt32(kKeyDisplayHeight, &displayHeight);
}
if (success) {
mDisplayWidth = displayWidth;
mDisplayHeight = displayHeight;
}
{
Mutex::Autolock autoLock(mStatsLock);
mStats.mVideoTrackIndex = mStats.mTracks.size();
mStats.mTracks.push();
TrackStat *stat =
&mStats.mTracks.editItemAt(mStats.mVideoTrackIndex);
stat->mMIME = mime.string();
}
} else if (!haveAudio && !strncasecmp(mime.string(), "audio/", 6)) {
//将extractor解析出来的AudioTrack对象赋值给mAudioTrack变量
setAudioSource(extractor->getTrack(i));
haveAudio = true;
mActiveAudioTrackIndex = i;
{
Mutex::Autolock autoLock(mStatsLock);
mStats.mAudioTrackIndex = mStats.mTracks.size();
mStats.mTracks.push();
TrackStat *stat =
&mStats.mTracks.editItemAt(mStats.mAudioTrackIndex);
stat->mMIME = mime.string();
}
if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_AUDIO_VORBIS)) {
// Only do this for vorbis audio, none of the other audio
// formats even support this ringtone specific hack and
// retrieving the metadata on some extractors may turn out
// to be very expensive.
sp fileMeta = extractor->getMetaData();
int32_t loop;
if (fileMeta != NULL
&& fileMeta->findInt32(kKeyAutoLoop, &loop) && loop != 0) {
modifyFlags(AUTO_LOOPING, SET);
}
}
} else if (!strcasecmp(mime.string(), MEDIA_MIMETYPE_TEXT_3GPP)) {
addTextSource_l(i, extractor->getTrack(i));
}
}
if (!haveAudio && !haveVideo) {
if (mWVMExtractor != NULL) {
return mWVMExtractor->getError();
} else {
return UNKNOWN_ERROR;
}
}
mExtractorFlags = extractor->flags();
return OK;
}
//通过解析文件的格式,生成对应的extractor对象
status_t AwesomePlayer::setDataSource_l(
const sp &dataSource) {
sp extractor = MediaExtractor::Create(dataSource);
if (extractor == NULL) {
return UNKNOWN_ERROR;
}
if (extractor->getDrmFlag()) {
checkDrmStatus(dataSource);
}
return setDataSource_l(extractor);
}
sp MediaExtractor::Create(
const sp &source, const char *mime) {
sp meta;
String8 tmp;
if (mime == NULL) {
float confidence;
if (!source->sniff(&tmp, &confidence, &meta)) {
ALOGV("FAILED to autodetect media content.");
return NULL;
}
mime = tmp.string();
ALOGV("Autodetected media content as '%s' with confidence %.2f",
mime, confidence);
}
bool isDrm = false;
// DRM MIME type syntax is "drm+type+original" where
// type is "es_based" or "container_based" and
// original is the content's cleartext MIME type
if (!strncmp(mime, "drm+", 4)) {
const char *originalMime = strchr(mime+4, '+');
if (originalMime == NULL) {
// second + not found
return NULL;
}
++originalMime;
if (!strncmp(mime, "drm+es_based+", 13)) {
// DRMExtractor sets container metadata kKeyIsDRM to 1
return new DRMExtractor(source, originalMime);
} else if (!strncmp(mime, "drm+container_based+", 20)) {
mime = originalMime;
isDrm = true;
} else {
return NULL;
}
}
MediaExtractor *ret = NULL;
if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG4)
|| !strcasecmp(mime, "audio/mp4")) {
ret = new MPEG4Extractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
ret = new MP3Extractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_NB)
|| !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AMR_WB)) {
ret = new AMRExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
ret = new FLACExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WAV)) {
ret = new WAVExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_OGG)) {
ret = new OggExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MATROSKA)) {
ret = new MatroskaExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2TS)) {
ret = new MPEG2TSExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_WVM)) {
// Return now. WVExtractor should not have the DrmFlag set in the block below.
return new WVMExtractor(source);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC_ADTS)) {
ret = new AACExtractor(source, meta);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
ret = new MPEG2PSExtractor(source);
}
if (ret != NULL) {
if (isDrm) {
ret->setDrmFlag(true);
} else {
ret->setDrmFlag(false);
}
}
return ret;
}
//补充下OMXCodec生成具体Decoder的过程,调用成功后会返回一个OMXCodec对象
sp OMXCodec::Create(
const sp &omx,
const sp &meta, bool createEncoder,
const sp &source,
const char *matchComponentName,
uint32_t flags,
const sp &nativeWindow) {
int32_t requiresSecureBuffers;
if (source->getFormat()->findInt32(
kKeyRequiresSecureBuffers,
&requiresSecureBuffers)
&& requiresSecureBuffers) {
flags |= kIgnoreCodecSpecificData;
flags |= kUseSecureInputBuffers;
}
const char *mime;
bool success = meta->findCString(kKeyMIMEType, &mime);
CHECK(success);
Vector matchingCodecs;
findMatchingCodecs(
mime, createEncoder, matchComponentName, flags, &matchingCodecs);
if (matchingCodecs.isEmpty()) {
ALOGV("No matching codecs! (mime: %s, createEncoder: %s, "
"matchComponentName: %s, flags: 0x%x)",
mime, createEncoder ? "true" : "false", matchComponentName, flags);
return NULL;
}
sp observer = new OMXCodecObserver;
IOMX::node_id node = 0;
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const char *componentNameBase = matchingCodecs[i].mName.string();
uint32_t quirks = matchingCodecs[i].mQuirks;
const char *componentName = componentNameBase;
AString tmp;
if (flags & kUseSecureInputBuffers) {
tmp = componentNameBase;
tmp.append(".secure");
componentName = tmp.c_str();
}
if (createEncoder) {
sp softwareCodec =
InstantiateSoftwareEncoder(componentName, source, meta);
if (softwareCodec != NULL) {
ALOGV("Successfully allocated software codec '%s'", componentName);
return softwareCodec;
}
}
ALOGV("Attempting to allocate OMX node '%s'", componentName);
if (!createEncoder
&& (quirks & kOutputBuffersAreUnreadable)
&& (flags & kClientNeedsFramebuffer)) {
if (strncmp(componentName, "OMX.SEC.", 8)) {
// For OMX.SEC.* decoders we can enable a special mode that
// gives the client access to the framebuffer contents.
ALOGW("Component '%s' does not give the client access to "
"the framebuffer contents. Skipping.",
componentName);
continue;
}
}
status_t err = omx->allocateNode(componentName, observer, &node);
if (err == OK) {
ALOGV("Successfully allocated OMX node '%s'", componentName);
sp codec = new OMXCodec(
omx, node, quirks, flags,
createEncoder, mime, componentName,
source, nativeWindow);
observer->setCodec(codec);
err = codec->configureCodec(meta);
if (err == OK) {
if (!strcmp("OMX.Nvidia.mpeg2v.decode", componentName)) {
codec->mFlags |= kOnlySubmitOneInputBufferAtOneTime;
}
return codec;
}
ALOGV("Failed to configure codec '%s'", componentName);
}
}
return NULL;
}
//负责获取解码后的数据,供AudioPlayer使用
status_t OMXCodec::read(
MediaBuffer **buffer, const ReadOptions *options) {
status_t err = OK;
*buffer = NULL;
Mutex::Autolock autoLock(mLock);
if (mState != EXECUTING && mState != RECONFIGURING) {
return UNKNOWN_ERROR;
}
bool seeking = false;
int64_t seekTimeUs;
ReadOptions::SeekMode seekMode;
if (options && options->getSeekTo(&seekTimeUs, &seekMode)) {
seeking = true;
}
if (mInitialBufferSubmit) {
mInitialBufferSubmit = false;
if (seeking) {
CHECK(seekTimeUs >= 0);
mSeekTimeUs = seekTimeUs;
mSeekMode = seekMode;
// There's no reason to trigger the code below, there's
// nothing to flush yet.
seeking = false;
mPaused = false;
}
drainInputBuffers();
if (mState == EXECUTING) {
// Otherwise mState == RECONFIGURING and this code will trigger
// after the output port is reenabled.
fillOutputBuffers();
}
}
if (seeking) {
while (mState == RECONFIGURING) {
if ((err = waitForBufferFilled_l()) != OK) {
return err;
}
}
if (mState != EXECUTING) {
return UNKNOWN_ERROR;
}
CODEC_LOGV("seeking to %lld us (%.2f secs)", seekTimeUs, seekTimeUs / 1E6);
mSignalledEOS = false;
CHECK(seekTimeUs >= 0);
mSeekTimeUs = seekTimeUs;
mSeekMode = seekMode;
mFilledBuffers.clear();
CHECK_EQ((int)mState, (int)EXECUTING);
bool emulateInputFlushCompletion = !flushPortAsync(kPortIndexInput);
bool emulateOutputFlushCompletion = !flushPortAsync(kPortIndexOutput);
if (emulateInputFlushCompletion) {
onCmdComplete(OMX_CommandFlush, kPortIndexInput);
}
if (emulateOutputFlushCompletion) {
onCmdComplete(OMX_CommandFlush, kPortIndexOutput);
}
while (mSeekTimeUs >= 0) {
if ((err = waitForBufferFilled_l()) != OK) {
return err;
}
}
}
while (mState != ERROR && !mNoMoreOutputData && mFilledBuffers.empty()) {
if ((err = waitForBufferFilled_l()) != OK) {
return err;
}
}
if (mState == ERROR) {
return UNKNOWN_ERROR;
}
if (mFilledBuffers.empty()) {
return mSignalledEOS ? mFinalStatus : ERROR_END_OF_STREAM;
}
if (mOutputPortSettingsHaveChanged) {
mOutputPortSettingsHaveChanged = false;
return INFO_FORMAT_CHANGED;
}
size_t index = *mFilledBuffers.begin();
mFilledBuffers.erase(mFilledBuffers.begin());
BufferInfo *info = &mPortBuffers[kPortIndexOutput].editItemAt(index);
CHECK_EQ((int)info->mStatus, (int)OWNED_BY_US);
info->mStatus = OWNED_BY_CLIENT;
info->mMediaBuffer->add_ref();
if (mSkipCutBuffer != NULL) {
mSkipCutBuffer->submit(info->mMediaBuffer);
}
*buffer = info->mMediaBuffer;
return OK;
}