Android OMX
Android Audio 的播放
AwesomePlayer 中 媒体流buffer 中的传递
[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-P309R9zr-1585192696849)(https://www.processon.com/chart_image/5821cd85e4b0ffc8195fdc44.png)]
前面已经分析了Video 的播放,现在看下Audio 的播放过程。Audio decode 后的数据是送给AudioFlinger处理, 由AudioFlinger 和硬件打交道。
status_t AwesomePlayer::prepareAsync_l() {
mAsyncPrepareEvent = new AwesomeEvent(
this, &AwesomePlayer::onPrepareAsyncEvent);
mQueue.postEvent(mAsyncPrepareEvent);
return OK;
}
void AwesomePlayer::onPrepareAsyncEvent() {
Mutex::Autolock autoLock(mLock);
if (mAudioTrack != NULL && mAudioSource == NULL) {
status_t err = initAudioDecoder();
if (err != OK) {
abortPrepare(err);
return;
}
}
}
status_t AwesomePlayer::initAudioDecoder() {
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW)) {
mAudioSource = mAudioTrack;
} else {
mAudioSource = OMXCodec::Create(
mClient.interface(), mAudioTrack->getFormat(),
false, // createEncoder
mAudioTrack);
}
if (mAudioSource != NULL) {
status_t err = mAudioSource->start();
if (err != OK) {
mAudioSource.clear();
return err;
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_QCELP)) {
// For legacy reasons we're simply going to ignore the absence
// of an audio decoder for QCELP instead of aborting playback
// altogether.
return OK;
}
return mAudioSource != NULL ? OK : UNKNOWN_ERROR;
}
AwesomePlayer::play() 函数调用play_l。在play_l 中 new 了一个AudioPlayer。这个好像和video的流程不同。 这里还有一个mAudioSink变量。
status_t AwesomePlayer::play_l() {
if (mAudioSource != NULL) {
if (mAudioPlayer == NULL) {
if (mAudioSink != NULL) {
mAudioPlayer = new AudioPlayer(mAudioSink, allowDeepBuffering, this);
mAudioPlayer->setSource(mAudioSource);
}
}
if (mVideoSource == NULL) {
status_t err = startAudioPlayer_l(false /* sendErrorNotification */);
}
}
return OK;
}
AudioSink 是什么, 来自哪里。首先看AudioSink 来自哪里,在MediaPlayerService 创建完player后,根据hardwareOutput的结果设置 setAudioSink。这里hardwareOutput 为虚函数,默认结果为false。new了一个AudioOutput 。
sp MediaPlayerService::Client::setDataSource_pre(
player_type playerType)
{
sp p = createPlayer(playerType);
if (p == NULL) {
return p;
}
if (!p->hardwareOutput()) {
mAudioOutput = new AudioOutput(mAudioSessionId);
static_cast(p.get())->setAudioSink(mAudioOutput);
}
return p;
}
AudioOutput 定义在MediaPlayerService.h 中,继承了AudioSink。AudioSink为一个纯虚函数,具体实现在AudioOutput中。AudioOutput的代码比较简单大部分是调用mTrack的实现功能。mTrack为AudioTrack 类。AudioOutput 其实就是对AudioTrack的一个简单封装。
class MediaPlayerService : public BnMediaPlayerService
{
class AudioOutput : public MediaPlayerBase::AudioSink
{
class CallbackData {
public:
CallbackData(AudioOutput *cookie) {
mData = cookie;
mSwitching = false;
}
}
}; // AudioOutput
}
ssize_t MediaPlayerService::AudioOutput::frameCount() const
{
if (mTrack == 0) return NO_INIT;
return mTrack->frameCount();
}
ssize_t MediaPlayerService::AudioOutput::channelCount() const
{
if (mTrack == 0) return NO_INIT;
return mTrack->channelCount();
}
AudioPlayer 中的成员变量中主要有AudioTrack sp mSource 和 MediaBuffer *mInputBuffer。
是对音频播放的一个封装。
mAudioSource = OMXCodec::Create(
mClient.interface(), mAudioTrack->getFormat(),
false, // createEncoder
mAudioTrack);
在startAudioPlayer_l 中调用AudioPlayer 的start函数。这里走mAudioSink->open 。在open 中new了 AudioTrack出来。
需要注意的是在open 中传递的 AudioPlayer::AudioSinkCallback 回调函数。然后调用start 开始播放。
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
mFirstBufferResult = mSource->read(&mFirstBuffer, &options);
if (mFirstBufferResult == INFO_FORMAT_CHANGED) {
mFirstBufferResult = OK;
mIsFirstBuffer = false;
} else {
mIsFirstBuffer = true;
}
if (mAudioSink.get() != NULL) {
status_t err = mAudioSink->open(
mSampleRate, numChannels, channelMask, AUDIO_FORMAT_PCM_16_BIT,
DEFAULT_AUDIOSINK_BUFFERCOUNT,
&AudioPlayer::AudioSinkCallback,
this,
(mAllowDeepBuffering ?
AUDIO_OUTPUT_FLAG_DEEP_BUFFER :
AUDIO_OUTPUT_FLAG_NONE));
return err;
}
mAudioSink->start();
} else {
mAudioTrack = new AudioTrack(
AUDIO_STREAM_MUSIC, mSampleRate, AUDIO_FORMAT_PCM_16_BIT, audioMask,
0, AUDIO_OUTPUT_FLAG_NONE, &AudioCallback, this, 0);
mAudioTrack->start();
}
return OK;
}
status_t MediaPlayerService::AudioOutput::open(
uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
audio_format_t format, int bufferCount,
AudioCallback cb, void *cookie,
audio_output_flags_t flags)
{
mCallback = cb;
if (mCallback != NULL) {
newcbd = new CallbackData(this);
t = new AudioTrack(
mStreamType,
sampleRate,
format,
channelMask,
frameCount,
flags,
CallbackWrapper,
newcbd,
0, // notification frames
mSessionId);
} else {
t = new AudioTrack(
mStreamType,
sampleRate,
format,
channelMask,
frameCount,
flags,
NULL,
NULL,
0,
mSessionId);
}
}
在Video 的播放中,是启动了一个线程不停的循环读取数据,可是在AudioTrack 的start 中没有发现这样的逻辑。
回过头来看下AudioTrack的构造函数,在构造函数中调用了set ,set 中赋值回调函数,我们在上边提到的,然后new了一个AudioTrackThread。原来线程在这里。在AudioTrackThread 的Loop 中主要做了三件事:
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
int channelMask,
int frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
int notificationFrames,
const sp& sharedBuffer,
bool threadCanCallJava,
int sessionId)
{
mCbf = cbf;
if (cbf != NULL) {
mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
}
// create the IAudioTrack
status_t status = createTrack_l(streamType,
sampleRate,
format,
(uint32_t)channelMask,
frameCount,
flags,
sharedBuffer,
output);
return NO_ERROR;
}
bool AudioTrack::AudioTrackThread::threadLoop()
{
{
AutoMutex _l(mMyLock);
if (mPaused) {
mMyCond.wait(mMyLock);
// caller will check for exitPending()
return true;
}
}
if (!mReceiver.processAudioBuffer(this)) {
pause();
}
return true;
}
bool AudioTrack::processAudioBuffer(const sp& thread)
{
Buffer audioBuffer;
do {
status_t err = obtainBuffer(&audioBuffer, waitCount);
mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
releaseBuffer(&audioBuffer);
}
while (frames);
return true;
}
mCbf 根据传递的参数类型为CallbackWrapper, 为静态函数。调用了mCallback。在上面AudiOOUtput open的时候提到过这个回调。类型为:AudioPlayer::AudioSinkCallback。在AudioSinkCallback中调用OMXcode的read函数读出解码后的数据,然后copy 到callback 传递的buffer中,完成数据的copy。
static void MediaPlayerService::AudioOutput::CallbackWrapper(
int event, void *cookie, void *info) {
//ALOGV("callbackwrapper");
if (event != AudioTrack::EVENT_MORE_DATA) {
return;
}
CallbackData *data = (CallbackData*)cookie;
data->lock();
AudioOutput *me = data->getOutput();
AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
size_t actualSize = (*me->mCallback)(
me, buffer->raw, buffer->size, me->mCallbackCookie);
}
// static
size_t AudioPlayer::AudioSinkCallback(
MediaPlayerBase::AudioSink *audioSink,
void *buffer, size_t size, void *cookie) {
AudioPlayer *me = (AudioPlayer *)cookie;
return me->fillBuffer(buffer, size);
}
size_t AudioPlayer::fillBuffer(void *data, size_t size) {
while (size_remaining > 0) {
if (mInputBuffer == NULL) {
if (mIsFirstBuffer) {
mInputBuffer = mFirstBuffer;
mFirstBuffer = NULL;
err = mFirstBufferResult;
mIsFirstBuffer = false;
} else {
err = mSource->read(&mInputBuffer, &options);
}
}
memcpy((char *)data + size_done,
(const char *)mInputBuffer->data() + mInputBuffer->range_offset(),
copy);
}
return size_done;
}