SoftOMXComponent::SoftOMXComponent( const char *name, const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData, OMX_COMPONENTTYPE **component) : mName(name), mCallbacks(callbacks), mComponent(new OMX_COMPONENTTYPE), mLibHandle(NULL) { mComponent->nSize = sizeof(*mComponent); mComponent->nVersion.s.nVersionMajor = 1; mComponent->nVersion.s.nVersionMinor = 0; mComponent->nVersion.s.nRevision = 0; mComponent->nVersion.s.nStep = 0; mComponent->pComponentPrivate = this; mComponent->pApplicationPrivate = appData; mComponent->GetComponentVersion = NULL; mComponent->SendCommand = SendCommandWrapper; mComponent->GetParameter = GetParameterWrapper; mComponent->SetParameter = SetParameterWrapper; mComponent->GetConfig = GetConfigWrapper; mComponent->SetConfig = SetConfigWrapper; mComponent->GetExtensionIndex = GetExtensionIndexWrapper; mComponent->GetState = GetStateWrapper; mComponent->ComponentTunnelRequest = NULL; mComponent->UseBuffer = UseBufferWrapper; mComponent->AllocateBuffer = AllocateBufferWrapper; mComponent->FreeBuffer = FreeBufferWrapper; mComponent->EmptyThisBuffer = EmptyThisBufferWrapper; mComponent->FillThisBuffer = FillThisBufferWrapper; mComponent->SetCallbacks = NULL; mComponent->ComponentDeInit = NULL; mComponent->UseEGLImage = NULL; mComponent->ComponentRoleEnum = NULL; *component = mComponent; } SimpleSoftOMXComponent::SimpleSoftOMXComponent( const char *name, const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData, OMX_COMPONENTTYPE **component) : SoftOMXComponent(name, callbacks, appData, component), mLooper(new ALooper), mHandler(new AHandlerReflector<SimpleSoftOMXComponent>(this)), mState(OMX_StateLoaded), mTargetState(OMX_StateLoaded) { mLooper->setName(name); mLooper->registerHandler(mHandler); mLooper->start( false, // runOnCallingThread false, // canCallJava ANDROID_PRIORITY_FOREGROUND); } SoftAAC2::SoftAAC2( const char *name, const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData, OMX_COMPONENTTYPE **component) : SimpleSoftOMXComponent(name, callbacks, appData, component), mAACDecoder(NULL), mStreamInfo(NULL), mIsADTS(false), mInputBufferCount(0), mSignalledError(false), mSawInputEos(false), mSignalledOutputEos(false), mAnchorTimeUs(0), mNumSamplesOutput(0), mOutputPortSettingsChange(NONE) { initPorts(); CHECK_EQ(initDecoder(), (status_t)OK); } void SoftAAC2::initPorts() { OMX_PARAM_PORTDEFINITIONTYPE def; InitOMXParams(&def); def.nPortIndex = 0; def.eDir = OMX_DirInput; def.nBufferCountMin = kNumInputBuffers; def.nBufferCountActual = def.nBufferCountMin; def.nBufferSize = 8192; def.bEnabled = OMX_TRUE; def.bPopulated = OMX_FALSE; def.eDomain = OMX_PortDomainAudio; def.bBuffersContiguous = OMX_FALSE; def.nBufferAlignment = 1; def.format.audio.cMIMEType = const_cast<char *>("audio/aac"); def.format.audio.pNativeRender = NULL; def.format.audio.bFlagErrorConcealment = OMX_FALSE; def.format.audio.eEncoding = OMX_AUDIO_CodingAAC; addPort(def); def.nPortIndex = 1; def.eDir = OMX_DirOutput; def.nBufferCountMin = kNumOutputBuffers; def.nBufferCountActual = def.nBufferCountMin; def.nBufferSize = 4096 * MAX_CHANNEL_COUNT; def.bEnabled = OMX_TRUE; def.bPopulated = OMX_FALSE; def.eDomain = OMX_PortDomainAudio; def.bBuffersContiguous = OMX_FALSE; def.nBufferAlignment = 2; def.format.audio.cMIMEType = const_cast<char *>("audio/raw"); def.format.audio.pNativeRender = NULL; def.format.audio.bFlagErrorConcealment = OMX_FALSE; def.format.audio.eEncoding = OMX_AUDIO_CodingPCM; addPort(def); } //初始化解码器, status_t SoftAAC2::initDecoder() { status_t status = UNKNOWN_ERROR; //获得解码库的句柄mAACDecoder mAACDecoder = aacDecoder_Open(TT_MP4_ADIF, /* num layers */ 1); if (mAACDecoder != NULL) { //获取aac的一些信息,保存在结构体CStreamInfo中 mStreamInfo = aacDecoder_GetStreamInfo(mAACDecoder); if (mStreamInfo != NULL) { status = OK; } } mDecoderHasData = false; // for streams that contain metadata, use the mobile profile DRC settings unless overridden // by platform properties: char value[PROPERTY_VALUE_MAX]; // * AAC_DRC_REFERENCE_LEVEL if (property_get(PROP_DRC_OVERRIDE_REF_LEVEL, value, NULL)) { unsigned refLevel = atoi(value); ALOGV("AAC decoder using AAC_DRC_REFERENCE_LEVEL of %d instead of %d", refLevel, DRC_DEFAULT_MOBILE_REF_LEVEL); aacDecoder_SetParam(mAACDecoder, AAC_DRC_REFERENCE_LEVEL, refLevel); } else { aacDecoder_SetParam(mAACDecoder, AAC_DRC_REFERENCE_LEVEL, DRC_DEFAULT_MOBILE_REF_LEVEL); } // * AAC_DRC_ATTENUATION_FACTOR if (property_get(PROP_DRC_OVERRIDE_CUT, value, NULL)) { unsigned cut = atoi(value); ALOGV("AAC decoder using AAC_DRC_ATTENUATION_FACTOR of %d instead of %d", cut, DRC_DEFAULT_MOBILE_DRC_CUT); aacDecoder_SetParam(mAACDecoder, AAC_DRC_ATTENUATION_FACTOR, cut); } else { aacDecoder_SetParam(mAACDecoder, AAC_DRC_ATTENUATION_FACTOR, DRC_DEFAULT_MOBILE_DRC_CUT); } // * AAC_DRC_BOOST_FACTOR (note: no default, using cut) if (property_get(PROP_DRC_OVERRIDE_BOOST, value, NULL)) { unsigned boost = atoi(value); ALOGV("AAC decoder using AAC_DRC_BOOST_FACTOR of %d", boost); aacDecoder_SetParam(mAACDecoder, AAC_DRC_BOOST_FACTOR, boost); } else { aacDecoder_SetParam(mAACDecoder, AAC_DRC_BOOST_FACTOR, DRC_DEFAULT_MOBILE_DRC_BOOST); } return status; } void SoftAAC2::onQueueFilled(OMX_U32 portIndex) { if (mSignalledError || mOutputPortSettingsChange != NONE) { return; } UCHAR* inBuffer[FILEREAD_MAX_LAYERS]; UINT inBufferLength[FILEREAD_MAX_LAYERS] = {0}; UINT bytesValid[FILEREAD_MAX_LAYERS] = {0}; List<BufferInfo *> &inQueue = getPortQueue(0); List<BufferInfo *> &outQueue = getPortQueue(1); if (portIndex == 0 && mInputBufferCount == 0) { ++mInputBufferCount; BufferInfo *info = *inQueue.begin(); OMX_BUFFERHEADERTYPE *header = info->mHeader; inBuffer[0] = header->pBuffer + header->nOffset; inBufferLength[0] = header->nFilledLen; AAC_DECODER_ERROR decoderErr = aacDecoder_ConfigRaw(mAACDecoder, inBuffer, inBufferLength); if (decoderErr != AAC_DEC_OK) { mSignalledError = true; notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); return; } inQueue.erase(inQueue.begin()); info->mOwnedByUs = false; notifyEmptyBufferDone(header); // Only send out port settings changed event if both sample rate // and numChannels are valid. if (mStreamInfo->sampleRate && mStreamInfo->numChannels) { maybeConfigureDownmix(); ALOGI("Initially configuring decoder: %d Hz, %d channels", mStreamInfo->sampleRate, mStreamInfo->numChannels); notify(OMX_EventPortSettingsChanged, 1, 0, NULL); mOutputPortSettingsChange = AWAITING_DISABLED; } return; } while ((!inQueue.empty() || (mSawInputEos && !mSignalledOutputEos)) && !outQueue.empty()) { BufferInfo *inInfo = NULL; OMX_BUFFERHEADERTYPE *inHeader = NULL; if (!inQueue.empty()) { inInfo = *inQueue.begin(); inHeader = inInfo->mHeader; } BufferInfo *outInfo = *outQueue.begin(); OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader; outHeader->nFlags = 0; if (inHeader) { if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) { mSawInputEos = true; } if (inHeader->nOffset == 0 && inHeader->nFilledLen) { mAnchorTimeUs = inHeader->nTimeStamp; mNumSamplesOutput = 0; } if (mIsADTS) { size_t adtsHeaderSize = 0; // skip 30 bits, aac_frame_length follows. // ssssssss ssssiiip ppffffPc ccohCCll llllllll lll????? const uint8_t *adtsHeader = inHeader->pBuffer + inHeader->nOffset; bool signalError = false; if (inHeader->nFilledLen < 7) { ALOGE("Audio data too short to contain even the ADTS header. " "Got %d bytes.", inHeader->nFilledLen); hexdump(adtsHeader, inHeader->nFilledLen); signalError = true; } else { bool protectionAbsent = (adtsHeader[1] & 1); unsigned aac_frame_length = ((adtsHeader[3] & 3) << 11) | (adtsHeader[4] << 3) | (adtsHeader[5] >> 5); if (inHeader->nFilledLen < aac_frame_length) { ALOGE("Not enough audio data for the complete frame. " "Got %d bytes, frame size according to the ADTS " "header is %u bytes.", inHeader->nFilledLen, aac_frame_length); hexdump(adtsHeader, inHeader->nFilledLen); signalError = true; } else { adtsHeaderSize = (protectionAbsent ? 7 : 9); inBuffer[0] = (UCHAR *)adtsHeader + adtsHeaderSize; inBufferLength[0] = aac_frame_length - adtsHeaderSize; inHeader->nOffset += adtsHeaderSize; inHeader->nFilledLen -= adtsHeaderSize; } } if (signalError) { mSignalledError = true; notify(OMX_EventError, OMX_ErrorStreamCorrupt, ERROR_MALFORMED, NULL); return; } } else { inBuffer[0] = inHeader->pBuffer + inHeader->nOffset; inBufferLength[0] = inHeader->nFilledLen; } } else { inBufferLength[0] = 0; } // Fill and decode INT_PCM *outBuffer = reinterpret_cast<INT_PCM *>( outHeader->pBuffer + outHeader->nOffset); bytesValid[0] = inBufferLength[0]; int prevSampleRate = mStreamInfo->sampleRate; int prevNumChannels = mStreamInfo->numChannels; AAC_DECODER_ERROR decoderErr = AAC_DEC_NOT_ENOUGH_BITS; while ((bytesValid[0] > 0 || mSawInputEos) && decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { mDecoderHasData |= (bytesValid[0] > 0); aacDecoder_Fill(mAACDecoder, inBuffer, inBufferLength, bytesValid); decoderErr = aacDecoder_DecodeFrame(mAACDecoder, outBuffer, outHeader->nAllocLen, 0 /* flags */); if (decoderErr == AAC_DEC_NOT_ENOUGH_BITS) { if (mSawInputEos && bytesValid[0] <= 0) { if (mDecoderHasData) { // flush out the decoder's delayed data by calling DecodeFrame // one more time, with the AACDEC_FLUSH flag set decoderErr = aacDecoder_DecodeFrame(mAACDecoder, outBuffer, outHeader->nAllocLen, AACDEC_FLUSH); mDecoderHasData = false; } outHeader->nFlags = OMX_BUFFERFLAG_EOS; mSignalledOutputEos = true; break; } else { ALOGW("Not enough bits, bytesValid %d", bytesValid[0]); } } } size_t numOutBytes = mStreamInfo->frameSize * sizeof(int16_t) * mStreamInfo->numChannels; if (inHeader) { if (decoderErr == AAC_DEC_OK) { UINT inBufferUsedLength = inBufferLength[0] - bytesValid[0]; inHeader->nFilledLen -= inBufferUsedLength; inHeader->nOffset += inBufferUsedLength; } else { ALOGW("AAC decoder returned error %d, substituting silence", decoderErr); memset(outHeader->pBuffer + outHeader->nOffset, 0, numOutBytes); // Discard input buffer. inHeader->nFilledLen = 0; aacDecoder_SetParam(mAACDecoder, AAC_TPDEC_CLEAR_BUFFER, 1); // fall through } if (inHeader->nFilledLen == 0) { inInfo->mOwnedByUs = false; inQueue.erase(inQueue.begin()); inInfo = NULL; notifyEmptyBufferDone(inHeader); inHeader = NULL; } } /* * AAC+/eAAC+ streams can be signalled in two ways: either explicitly * or implicitly, according to MPEG4 spec. AAC+/eAAC+ is a dual * rate system and the sampling rate in the final output is actually * doubled compared with the core AAC decoder sampling rate. * * Explicit signalling is done by explicitly defining SBR audio object * type in the bitstream. Implicit signalling is done by embedding * SBR content in AAC extension payload specific to SBR, and hence * requires an AAC decoder to perform pre-checks on actual audio frames. * * Thus, we could not say for sure whether a stream is * AAC+/eAAC+ until the first data frame is decoded. */ if (mInputBufferCount <= 2) { if (mStreamInfo->sampleRate != prevSampleRate || mStreamInfo->numChannels != prevNumChannels) { maybeConfigureDownmix(); ALOGI("Reconfiguring decoder: %d->%d Hz, %d->%d channels", prevSampleRate, mStreamInfo->sampleRate, prevNumChannels, mStreamInfo->numChannels); notify(OMX_EventPortSettingsChanged, 1, 0, NULL); mOutputPortSettingsChange = AWAITING_DISABLED; return; } } else if (!mStreamInfo->sampleRate || !mStreamInfo->numChannels) { ALOGW("Invalid AAC stream"); mSignalledError = true; notify(OMX_EventError, OMX_ErrorUndefined, decoderErr, NULL); return; } if (decoderErr == AAC_DEC_OK || mNumSamplesOutput > 0) { // We'll only output data if we successfully decoded it or // we've previously decoded valid data, in the latter case // (decode failed) we'll output a silent frame. outHeader->nFilledLen = numOutBytes; outHeader->nTimeStamp = mAnchorTimeUs + (mNumSamplesOutput * 1000000ll) / mStreamInfo->sampleRate; mNumSamplesOutput += mStreamInfo->frameSize; outInfo->mOwnedByUs = false; outQueue.erase(outQueue.begin()); outInfo = NULL; notifyFillBufferDone(outHeader); outHeader = NULL; } if (decoderErr == AAC_DEC_OK) { ++mInputBufferCount; } } }