Android Audio代码分析13 - AudioTrack::getPosition函数

打算以测试代码中所使用的接口为点,以接口间调用关系为线,逐步撕开Android中Audio的面纱。


*****************************************源码*************************************************
    public void testPlaybackHeadPositionAfterInit() throws Exception {
        // constants for test
        final String TEST_NAME = "testPlaybackHeadPositionAfterInit";
        final int TEST_SR = 22050;
        final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
        final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
        final int TEST_MODE = AudioTrack.MODE_STREAM;
        final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
        
        //-------- initialization --------------
        AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 
                AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT), TEST_MODE);
        //--------    test        --------------
        assumeTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
        assertTrue(TEST_NAME, track.getPlaybackHeadPosition() == 0);
        //-------- tear down      --------------
        track.release();
    }

**********************************************************************************************
源码路径:
frameworks\base\media\tests\mediaframeworktest\src\com\android\mediaframeworktest\functional\MediaAudioTrackTest.java


#######################说明################################
    //Test case 1: getPlaybackHeadPosition() at 0 after initialization
    public void testPlaybackHeadPositionAfterInit() throws Exception {
        // constants for test
        final String TEST_NAME = "testPlaybackHeadPositionAfterInit";
        final int TEST_SR = 22050;
        final int TEST_CONF = AudioFormat.CHANNEL_OUT_STEREO;
        final int TEST_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
        final int TEST_MODE = AudioTrack.MODE_STREAM;
        final int TEST_STREAM_TYPE = AudioManager.STREAM_MUSIC;
        
        //-------- initialization --------------
        AudioTrack track = new AudioTrack(TEST_STREAM_TYPE, TEST_SR, TEST_CONF, TEST_FORMAT, 
                AudioTrack.getMinBufferSize(TEST_SR, TEST_CONF, TEST_FORMAT), TEST_MODE);
        //--------    test        --------------
        assumeTrue(TEST_NAME, track.getState() == AudioTrack.STATE_INITIALIZED);
// 今天要看的,就是下面的getPlaybackHeadPosition函数
// 由于在getPlaybackHeadPosition之前,并没有调用play函数,也就是没有开始播放,所以获取的position应该为0
        assertTrue(TEST_NAME, track.getPlaybackHeadPosition() == 0);
// +++++++++++++++++++++++++++++++getPlaybackHeadPosition+++++++++++++++++++++++++++++++++
    /**
     * Returns the playback head position expressed in frames
     */
    public int getPlaybackHeadPosition() {
// 很直接
// 调用的是函数android_media_AudioTrack_get_position
        return native_get_position();
// ++++++++++++++++++++++++++++++android_media_AudioTrack_get_position++++++++++++++++++++++++++++++++++
// 路径:frameworks\base\core\jni\android_media_AudioTrack.cpp


static jint android_media_AudioTrack_get_position(JNIEnv *env,  jobject thiz) {
    
    AudioTrack *lpTrack = (AudioTrack *)env->GetIntField(
                thiz, javaAudioTrackFields.nativeTrackInJavaObj);
    uint32_t position = 0;
                
    if (lpTrack) {
        lpTrack->getPosition(&position);
        return (jint)position;
// ++++++++++++++++++++++++++++++++AudioTrack::getPosition++++++++++++++++++++++++++++++++
status_t AudioTrack::getPosition(uint32_t *position)
{
    if (position == 0) return BAD_VALUE;


    *position = mCblk->server;


    return NO_ERROR;
}


// server是mCblk的成员变量,mCblk是audio_track_cblk_t对象。
// server在函数audio_track_cblk_t::stepServer中有被赋值。
// 另外,mCblk->server有在函数AudioTrack::setPosition中被赋值。
// +++++++++++++++++++++++++++++++++AudioTrack::setPosition+++++++++++++++++++++++++++++++
status_t AudioTrack::setPosition(uint32_t position)
{
    Mutex::Autolock _l(mCblk->lock);


    if (!stopped()) return INVALID_OPERATION;


    if (position > mCblk->user) return BAD_VALUE;


    mCblk->server = position;
    mCblk->flags |= CBLK_FORCEREADY_ON;


    return NO_ERROR;
}


// android_media_AudioTrack.cpp文件中的以下两个函数调用了AudioTrack::setPosition:
// android_media_AudioTrack_set_pos_update_period函数
// android_media_AudioTrack_set_position函数
// 这些接口上给java层用的。
// 想象一下使用场景,拖动当前光标?
// ---------------------------------AudioTrack::setPosition-------------------------------
// +++++++++++++++++++++++++++++++++audio_track_cblk_t::stepServer+++++++++++++++++++++++++++++++
bool audio_track_cblk_t::stepServer(uint32_t frameCount)
{
    // the code below simulates lock-with-timeout
    // we MUST do this to protect the AudioFlinger server
    // as this lock is shared with the client.
    status_t err;


    err = lock.tryLock();
    if (err == -EBUSY) { // just wait a bit
        usleep(1000);
        err = lock.tryLock();
    }
    if (err != NO_ERROR) {
        // probably, the client just died.
        return false;
    }


    uint64_t s = this->server;


    s += frameCount;
    if (flags & CBLK_DIRECTION_MSK) {
        // Mark that we have read the first buffer so that next time stepUser() is called
        // we switch to normal obtainBuffer() timeout period
        if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS) {
            bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS - 1;
        }
        // It is possible that we receive a flush()
        // while the mixer is processing a block: in this case,
        // stepServer() is called After the flush() has reset u & s and
        // we have s > u
        if (s > this->user) {
            LOGW("stepServer occured after track reset");
            s = this->user;
        }
    }


    if (s >= loopEnd) {
        LOGW_IF(s > loopEnd, "stepServer: s %llu > loopEnd %llu", s, loopEnd);
        s = loopStart;
        if (--loopCount == 0) {
            loopEnd = ULLONG_MAX;
            loopStart = ULLONG_MAX;
        }
    }
    if (s >= serverBase + this->frameCount) {
        serverBase += this->frameCount;
    }


    this->server = s;


    cv.signal();
    lock.unlock();
    return true;
}


// 函数AudioFlinger::ThreadBase::TrackBase::step中调用了函数audio_track_cblk_t::stepServer。
// ++++++++++++++++++++++++++++++AudioFlinger::ThreadBase::TrackBase::step++++++++++++++++++++++++++++++++++
bool AudioFlinger::ThreadBase::TrackBase::step() {
    bool result;
    audio_track_cblk_t* cblk = this->cblk();


    result = cblk->stepServer(mFrameCount);
    if (!result) {
        LOGV("stepServer failed acquiring cblk mutex");
        mFlags |= STEPSERVER_FAILED;
    }
    return result;
}


// 函数AudioFlinger::PlaybackThread::Track::getNextBuffer和函数AudioFlinger::RecordThread::RecordTrack::getNextBuffer
// 调用了函数AudioFlinger::ThreadBase::TrackBase::step。
// 我们这儿只看函数AudioFlinger::PlaybackThread::Track::getNextBuffer。
// +++++++++++++++++++++++++++++AudioFlinger::PlaybackThread::Track::getNextBuffer+++++++++++++++++++++++++++++++++++
status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(AudioBufferProvider::Buffer* buffer)
{
     audio_track_cblk_t* cblk = this->cblk();
     uint32_t framesReady;
     uint32_t framesReq = buffer->frameCount;


     // Check if last stepServer failed, try to step now
     if (mFlags & TrackBase::STEPSERVER_FAILED) {
         if (!step())  goto getNextBuffer_exit;
         LOGV("stepServer recovered");
         mFlags &= ~TrackBase::STEPSERVER_FAILED;
     }


     framesReady = cblk->framesReady();


     if (LIKELY(framesReady)) {
        uint64_t s = cblk->server;
        uint64_t bufferEnd = cblk->serverBase + cblk->frameCount;


        bufferEnd = (cblk->loopEnd < bufferEnd) ? cblk->loopEnd : bufferEnd;
        if (framesReq > framesReady) {
            framesReq = framesReady;
        }
        if (s + framesReq > bufferEnd) {
            framesReq = bufferEnd - s;
        }


         buffer->raw = getBuffer(s, framesReq);
         if (buffer->raw == 0) goto getNextBuffer_exit;


         buffer->frameCount = framesReq;
        return NO_ERROR;
     }


getNextBuffer_exit:
     buffer->raw = 0;
     buffer->frameCount = 0;
     LOGV("getNextBuffer() no more data for track %d on thread %p", mName, mThread.unsafe_get());
     return NOT_ENOUGH_DATA;
}


// 该函数在看framesReady的时候已经看过了,此时就不再多说了。
// -----------------------------AudioFlinger::PlaybackThread::Track::getNextBuffer-----------------------------------
// ------------------------------AudioFlinger::ThreadBase::TrackBase::step----------------------------------
// ---------------------------------audio_track_cblk_t::stepServer-------------------------------
// --------------------------------AudioTrack::getPosition--------------------------------
    }  else {
        jniThrowException(env, "java/lang/IllegalStateException",
            "Unable to retrieve AudioTrack pointer for getPosition()");
        return AUDIOTRACK_ERROR;
    }
}
// ------------------------------android_media_AudioTrack_get_position----------------------------------
    }
// -------------------------------getPlaybackHeadPosition---------------------------------
        //-------- tear down      --------------
        track.release();
    }

###########################################################


&&&&&&&&&&&&&&&&&&&&&&&总结&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
get position其实就是取得audio_track_cblk_t对象中server的位置。
server的位置在set position或者step server的时候会被改变。
应用程序会set position。
Get next buffer的时候会step server。
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&

你可能感兴趣的:(android,audio,代码分析,initialization,frameworks,Constants)