Android Camera数据流分析全程记录(overlay方式二)

一篇文章overlay这个过程已经走了一遍,但是根本是这个流程还没有走完,由上一篇文章知道,最后调用了postFrame方法, postFrame这个方法都实现了什么样的功能呢???他是怎样是的从driver获得的数据最终显示成图像的呢??
这个问题我一直在寻求答案,不过很悲催啊??这个postFrame方法我始终没有理解清楚,这里有多少说多少自己的看法,希望有大神能指点指点

这里就从postFrame入手了:


status_t ANativeWindowDisplayAdapter::PostFrame(ANativeWindowDisplayAdapter::DisplayFrame &dispFrame)
{
    status_t ret = NO_ERROR;
    uint32_t actualFramesWithDisplay = 0;
    android_native_buffer_t *buffer = NULL;
    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
    int i;

    ///@todo Do cropping based on the stabilized frame coordinates
    ///@todo Insert logic to drop frames here based on refresh rate of
    ///display or rendering rate whichever is lower
    ///Queue the buffer to overlay

    if ( NULL == mANativeWindow ) {
        return NO_INIT;
    }

    if (!mBuffers || !dispFrame.mBuffer) {
        CAMHAL_LOGEA("NULL sent to PostFrame");
        return BAD_VALUE;
    }

    for ( i = 0; i < mBufferCount; i++ )
    {
        if ( dispFrame.mBuffer == &mBuffers[i] )
        {
            break;
        }
    }

    mFramesType.add( (int)mBuffers[i].opaque ,dispFrame.mType );

    if ( mDisplayState == ANativeWindowDisplayAdapter::DISPLAY_STARTED &&
                (!mPaused || CameraFrame::CameraFrame::SNAPSHOT_FRAME == dispFrame.mType) &&
               !mSuspend)
    {
        Mutex::Autolock lock(mLock);
        uint32_t xOff = (dispFrame.mOffset% PAGE_SIZE);
        uint32_t yOff = (dispFrame.mOffset / PAGE_SIZE);
        // Set crop only if current x and y offsets do not match with frame offsets
        if((mXOff!=xOff) || (mYOff!=yOff))
        {
            CAMHAL_LOGDB("Offset %d xOff = %d, yOff = %d", dispFrame.mOffset, xOff, yOff);  uint8_t bytesPerPixel;
            ///Calculate bytes per pixel based on the pixel format
            if(strcmp(mPixelFormat, (const char *) CameraParameters::PIXEL_FORMAT_YUV422I) == 0)
                {
                bytesPerPixel = 2;
                }
            else if(strcmp(mPixelFormat, (const char *) CameraParameters::PIXEL_FORMAT_RGB565) == 0)
               {
                bytesPerPixel = 2;
                 }
             else if(strcmp(mPixelFormat, (const char *) CameraParameters::PIXEL_FORMAT_YUV420SP) == 0)
                 {
                bytesPerPixel = 1;
                }
            else
                {
                bytesPerPixel = 1;
            }

            CAMHAL_LOGVB(" crop.left = %d crop.top = %d crop.right = %d crop.bottom = %d",
                         xOff/bytesPerPixel, yOff , (xOff/bytesPerPixel)+mPreviewWidth, yOff+mPreviewHeight);
            // We'll ignore any errors here, if the surface is
            // already invalid, we'll know soon enough.
            mANativeWindow->set_crop(mANativeWindow, xOff/bytesPerPixel, yOff,
                                     (xOff/bytesPerPixel)+mPreviewWidth, yOff+mPreviewHeight);

            ///Update the current x and y offsets
            mXOff = xOff;
            mYOff = yOff;
        }
        //这里说说自己对以上代码的理解,上面通过传入的displayFrame类型变量check,传入的配置是否与系统此时实际显示属性一致,不一致,则从新进行裁剪,配置显示大小,位置等
        {
            buffer_handle_t *handle = (buffer_handle_t *) mBuffers[i].opaque;
            // unlock buffer before sending to display
            mapper.unlock(*handle);
            ret = mANativeWindow->enqueue_buffer(mANativeWindow, handle);//这里将buffer入栈,下面会分析道这个函数的由来
        }
        if ( NO_ERROR != ret ) {
            CAMHAL_LOGE("Surface::queueBuffer returned error %d", ret);
        }

        mFramesWithCameraAdapterMap.removeItem((buffer_handle_t *) dispFrame.mBuffer->opaque);  // HWComposer has not minimum buffer requirement. We should be able to dequeue
        // the buffer immediately
        TIUTILS::Message msg;
        mDisplayQ.put(&msg);

#if PPM_INSTRUMENTATION || PPM_INSTRUMENTATION_ABS

     if ( mMeasureStandby )
      {
           CameraHal::PPM("Standby to first shot: Sensor Change completed - ", &mStandbyToShot);
           mMeasureStandby = false;
      }
      else if (CameraFrame::CameraFrame::SNAPSHOT_FRAME == dispFrame.mType)
      {
           CameraHal::PPM("Shot to snapshot: ", &mStartCapture);
           mShotToShot = true;
      }
      else if ( mShotToShot )
      {
           CameraHal::PPM("Shot to shot: ", &mStartCapture);
           mShotToShot = false;
      }
#endif

   }
   else
   {
      Mutex::Autolock lock(mLock);
      buffer_handle_t *handle = (buffer_handle_t *) mBuffers[i].opaque;

      // unlock buffer before giving it up
      mapper.unlock(*handle);

      // cancel buffer and dequeue another one
      ret = mANativeWindow->cancel_buffer(mANativeWindow, handle);
      if ( NO_ERROR != ret ) {
          CAMHAL_LOGE("Surface::cancelBuffer returned error %d", ret);
      }

      mFramesWithCameraAdapterMap.removeItem((buffer_handle_t *) dispFrame.mBuffer->opaque);

      TIUTILS::Message msg;
      mDisplayQ.put(&msg);
      ret = NO_ERROR;
   }

  return ret;
}
我们还是着重分析一下这个方法吧: mANativeWindow - > enqueue_buffer ( mANativeWindow ,  handle )
首先要找的mANativewindow这个变量类型的定义,找了好久才找到的,悲催:system\core\include\system\Window.h


struct ANativeWindow
{
#ifdef __cplusplus
   ANativeWindow()
       : flags(0), minSwapInterval(0), maxSwapInterval(0), xdpi(0), ydpi(0)
   {
        common.magic = ANDROID_NATIVE_WINDOW_MAGIC;
       common.version = sizeof(ANativeWindow);
        memset(common.reserved, 0, sizeof(common.reserved));
   }

    /* Implement the methods that sp<ANativeWindow> expects so that it
      can be used to automatically refcount ANativeWindow's. */
    void incStrong(const void* id) const {
        common.incRef(const_cast<android_native_base_t*>(&common));
    }
    void decStrong(const void* id) const {
        common.decRef(const_cast<android_native_base_t*>(&common));  }
 #endif

    struct android_native_base_t common;

    /* flags describing some attributes of this surface or its updater */
    const uint32_t flags;

    /* min swap interval supported by this updated */
    const int minSwapInterval;

   /* max swap interval supported by this updated */
   const int maxSwapInterval;

   /* horizontal and vertical resolution in DPI */
   const float xdpi;
   const float ydpi;

   /* Some storage reserved for the OEM's driver. */
   intptr_t oem[4];

   /*
    * Set the swap interval for this surface.
    *
    * Returns 0 on success or -errno on error.
    */
   int (*setSwapInterval)(struct ANativeWindow* window, int interval);

   /*
    * hook called by EGL to acquire a buffer. After this call, the buffer
    * is not locked, so its content cannot be modified.
    * this call may block if no buffers are available.
    *
    * Returns 0 on success or -errno on error.
    */
   int (*dequeueBuffer)(struct ANativeWindow* window, struct ANativeWindowBuffer** buffer);

   /*
   * hook called by EGL to lock a buffer. This MUST be called before modifying
   * the content of a buffer. The buffer must have been acquired with
   * dequeueBuffer first.
   *
   * Returns 0 on success or -errno on error.
   */
  int (*lockBuffer)(struct ANativeWindow* window, struct ANativeWindowBuffer* buffer);
  /*
   * hook called by EGL when modifications to the render buffer are done.
   * This unlocks and post the buffer.
   *
   * Buffers MUST be queued in the same order than they were dequeued.
   *
   * Returns 0 on success or -errno on error.
   */
   int (*queueBuffer)(struct ANativeWindow* window, struct ANativeWindowBuffer* buffer);

   /*
    * hook used to retrieve information about the native window.
    *
    * Returns 0 on success or -errno on error.
    */
   int (*query)(const struct ANativeWindow* window, int what, int* value);

   /*
    * hook used to perform various operations on the surface.
    * (*perform)() is a generic mechanism to add functionality to
    * ANativeWindow while keeping backward binary compatibility.
    *
    * DO NOT CALL THIS HOOK DIRECTLY. Instead, use the helper functions
    * defined below.
    *
    * (*perform)() returns -ENOENT if the 'what' parameter is not supported
    * by the surface's implementation.
    *
    * The valid operations are:
    * NATIVE_WINDOW_SET_USAGE
    * NATIVE_WINDOW_CONNECT (deprecated)
    * NATIVE_WINDOW_DISCONNECT (deprecated)
    * NATIVE_WINDOW_SET_CROP
    * NATIVE_WINDOW_SET_BUFFER_COUNT
    * NATIVE_WINDOW_SET_BUFFERS_GEOMETRY (deprecated)
    * NATIVE_WINDOW_SET_BUFFERS_TRANSFORM
    * NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP
    * NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS
    * NATIVE_WINDOW_SET_BUFFERS_FORMAT
    * NATIVE_WINDOW_SET_SCALING_MODE
    * NATIVE_WINDOW_LOCK (private)
    * NATIVE_WINDOW_UNLOCK_AND_POST (private)
    * NATIVE_WINDOW_API_CONNECT (private)
    * NATIVE_WINDOW_API_DISCONNECT (private)
    *
    */

   int (*perform)(struct ANativeWindow* window, int operation, ... );

   /*
    * hook used to cancel a buffer that has been dequeued.
    * No synchronization is performed between dequeue() and cancel(), so
    * either external synchronization is needed, or these functions must be
    * called from the same thread.
    */
   int (*cancelBuffer)(struct ANativeWindow* window, struct ANativeWindowBuffer* buffer);

    void* reserved_proc[2];
};
既然上面调用了这个对象的方法,那么必然你要找到他的方法是在什么地方实现的,怎么实现的
我们首先看看 mANativewindow到底前身是什么,我们调用的这个方法到底是在哪里实现的,首先我们看一下他是从哪里引入进来的??


int ANativeWindowDisplayAdapter::setPreviewWindow(preview_stream_ops_t* window)
{
   LOG_FUNCTION_NAME;
   ///Note that Display Adapter cannot work without a valid window object
   if ( !window)
      {
      CAMHAL_LOGEA("NULL window object passed to DisplayAdapter");
       LOG_FUNCTION_NAME_EXIT;
      return BAD_VALUE;
   }

   if ( window == mANativeWindow ) {
       return ALREADY_EXISTS;
   }

   ///Destroy the existing window object, if it exists
   destroy();

   ///Move to new window obj
   mANativeWindow = window;

   LOG_FUNCTION_NAME_EXIT;

   return NO_ERROR;
}
这个window参数最终要往上层追溯,我们这里直接说了,其实window的初始化定义在hardware interface那里,和camera service在一个目录下


/** Set the ANativeWindow to which preview frames are sent */
   status_t setPreviewWindow(const sp<ANativeWindow>& buf)
   {
       LOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get());

       if (mDevice->ops->set_preview_window) {
          mPreviewWindow = buf;
#ifdef OMAP_ENHANCEMENT_CPCAM
          mHalPreviewWindow.user = mPreviewWindow.get();
#else
          mHalPreviewWindow.user = this;
#endif
          LOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p", __FUNCTION__,
                  &mHalPreviewWindow, mHalPreviewWindow.user);
           return mDevice->ops->set_preview_window(mDevice, buf.get() ? &mHalPreviewWindow.nw : 0);
      }
      return INVALID_OPERATION;
}
上面我们看到,window通过mHalPreviewWindow传入到底层,我们还是要看看mHalPreviewWindow这个变量的初始化和实现在哪里??


status_t initialize(hw_module_t *module)
{
      LOGI("Opening camera %s", mName.string());
      int rc = module->methods->open(module, mName.string(),
                                    (hw_device_t **)&mDevice);
      if (rc != OK) {
           LOGE("Could not open camera %s: %d", mName.string(), rc);
           return rc;
      }
#ifdef OMAP_ENHANCEMENT_CPCAM
      initHalPreviewWindow(&mHalPreviewWindow);
      initHalPreviewWindow(&mHalTapin);
      initHalPreviewWindow(&mHalTapout);
#else
      initHalPreviewWindow();
#endif
      return rc;
}
在最初打开camera的时候会调用上面initialize方法,通过initHalPreviewWindow这个方法实现mHalPrevieWindow的初始化以及hal层需要方法是实现 


void initHalPreviewWindow(struct camera_preview_window *window)
{

    window->nw.cancel_buffer = __cancel_buffer;
    window->nw.lock_buffer = __lock_buffer;
    window->nw.dequeue_buffer = __dequeue_buffer;
    window->nw.enqueue_buffer = __enqueue_buffer;
    window->nw.set_buffer_count = __set_buffer_count;
    window->nw.set_buffers_geometry = __set_buffers_geometry;
    window->nw.set_crop = __set_crop;
    window->nw.set_metadata = __set_metadata;
    window->nw.set_usage = __set_usage;
    window->nw.set_swap_interval = __set_swap_interval;
    window->nw.update_and_get_buffer = __update_and_get_buffer;
    window->nw.get_metadata = __get_metadata;
    window->nw.get_buffer_dimension = __get_buffer_dimension;
    window->nw.get_buffer_format = __get_buffer_format;

    window->nw.get_min_undequeued_buffer_count =
              __get_min_undequeued_buffer_count;
}
这里进行了填充,我们就看看cancel_buffer方法的实现吧


static int __cancel_buffer(struct preview_stream_ops* w,
                   buffer_handle_t* buffer)
{
    ANativeWindow *a = anw(w);
      return a->cancelBuffer(a,container_of(buffer, ANativeWindowBuffer, handle));
}
这里只是作为一个转接点,没有做任何事情,而是直接去调用了其他实现了的方法实现操作
这里通过anm这个方法转换而来的ANativeWindow类型的变量a其实就是我们在上面initialize方式中初始化的mPreviewWindow 
而这个mPreviewWindow 就是上层传下来的的surface

这里是camera service层,个人认为是在这里对window这个参数进行了配置,连接api...还有其他的操作


status_t CameraService::Client::setPreviewWindow(const sp<IBinder>& binder,
       const sp<ANativeWindow>& window) {
   3.     Mutex::Autolock lock(mLock);
   4.     status_t result = checkPidAndHardware();
   5.     if (result != NO_ERROR) return result;
   6.

   7.     // return if no change in surface.
   8.     if (binder == mSurface) {
   9.         return NO_ERROR;
  10.     }
  11.

  12.     if (window != 0) {
  13.         result = native_window_api_connect(window.get(), NATIVE_WINDOW_API_CAMERA);
  14.         if (result != NO_ERROR) {
  15.             LOGE("native_window_api_connect failed: %s (%d)", strerror(-result),
  16.                     result);
  17.             return result;
  18.         }
  19.     }
  20.

  21.     // If preview has been already started, register preview buffers now.
  22.     if (mHardware->previewEnabled()) {
  23.         if (window != 0) {
  24.             native_window_set_scaling_mode(window.get(), NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
  25.             native_window_set_buffers_transform(window.get(), mOrientation);
  26.             result = mHardware->setPreviewWindow(window);
  27.         }
  28.     }
  29.

  30.     if (result == NO_ERROR) {
  31.         // Everything has succeeded. Disconnect the old window and remember the
  32.         // new window.
  33.         disconnectWindow(mPreviewWindow);
  34.         mSurface = binder;
  35.         mPreviewWindow = window;
  36.     } else {
  37.         // Something went wrong after we connected to the new window, so
  38.         // disconnect here.
  39.         disconnectWindow(window);
  40.     }
  41.

  42.     return result;
  43. }
这里我还是暂时不做说明吧,始终不是很理解这里为什么要这样做,一直以来上面调用的cancelBuffer方法到底是在哪里实现的,这个问题纠结了我很久
这里先说说现在的想法,自己的理解,maybe wrong
这里跳度很大,看看下面的方法,来源:frameworks\base\libs\gui\SurfaceTextureClient.cpp
说的是SurfaceTextureClient类的构造方法


SurfaceTextureClient::SurfaceTextureClient(
       const sp<ISurfaceTexture>& surfaceTexture)
{
   SurfaceTextureClient::init();
   SurfaceTextureClient::setISurfaceTexture(surfaceTexture);
}
先看看init方法:


1. void SurfaceTextureClient::init() {
   2.     // Initialize the ANativeWindow function pointers.
   3.     ANativeWindow::setSwapInterval = hook_setSwapInterval;
   4.     ANativeWindow::dequeueBuffer = hook_dequeueBuffer;
   5.     ANativeWindow::cancelBuffer = hook_cancelBuffer;
   6.     ANativeWindow::lockBuffer = hook_lockBuffer;
   7.     ANativeWindow::queueBuffer = hook_queueBuffer;
   8.     ANativeWindow::query = hook_query;
   9.     ANativeWindow::perform = hook_perform;
  10.

  11.     const_cast<int&>(ANativeWindow::minSwapInterval) = 0;
  12.     const_cast<int&>(ANativeWindow::maxSwapInterval) = 1;
  13.

  14.     mReqWidth = 0;
  15.     mReqHeight = 0;
  16.     mReqFormat = 0;
  17.     mReqUsage = 0;
  18.     mTimestamp = NATIVE_WINDOW_TIMESTAMP_AUTO;
  19.     mDefaultWidth = 0;
  20.     mDefaultHeight = 0;
  21.     mTransformHint = 0;
  22.     mConnectedToCpu = false;
  23. }
重点是上面我标注出来的部分,就现在的理解,这里就是上面说的cancelBuffer方法的归宿了,另外包括其他的所有方法,方法基本相同,这里为方便分析理解
我们以dequeueBuffer为例进行讲解,那就先看看hook_dequeueBuffer的实现


1. int SurfaceTextureClient::hook_dequeueBuffer(ANativeWindow* window,
   2.         ANativeWindowBuffer** buffer) {
   3.     SurfaceTextureClient* c = getSelf(window);
   4.     return c->dequeueBuffer(buffer);
   5. }
接着调用SurfaceTextureClient类的dequeueBuffer方法


1. int SurfaceTextureClient::dequeueBuffer(android_native_buffer_t** buffer) {
   2.     LOGV("SurfaceTextureClient::dequeueBuffer");
   3.     Mutex::Autolock lock(mMutex);
   4.     int buf = -1;
   5.     status_t result = mSurfaceTexture->dequeueBuffer(&buf, mReqWidth, mReqHeight,mReqFormat, mReqUsage);
   6.     if (result < 0) {
   7.         LOGV("dequeueBuffer: ISurfaceTexture::dequeueBuffer(%d, %d, %d, %d)"
   8.              "failed: %d", mReqWidth, mReqHeight, mReqFormat, mReqUsage,
   9.              result);
  10.         return result;
  11.     }
  12.     sp<GraphicBuffer>& gbuf(mSlots[buf]);
  13.     if (result & ISurfaceTexture::RELEASE_ALL_BUFFERS) {
  14.         freeAllBuffers();
  15.     }
  16.

  17.     if ((result & ISurfaceTexture::BUFFER_NEEDS_REALLOCATION) || gbuf == 0) {
  18.         result = mSurfaceTexture->requestBuffer(buf, &gbuf);
  19.         if (result != NO_ERROR) {
  20.             LOGE("dequeueBuffer: ISurfaceTexture::requestBuffer failed: %d",
  21.                     result);
  22.             return result;
  23.         }
  24.     }
  25.     *buffer = gbuf.get();
  26.     return OK;
  27. }
我们需要先找到mSurfaceTexture是在哪里定义的:system\media\mca\filterpacks\videosrc\java\CameraSource.java
private SurfaceTexture mSurfaceTexture;
接下来看看他是在哪里实例化的


1. @Override
   2.     public void open(FilterContext context) {
   3.         if (mLogVerbose) Log.v(TAG, "Opening");
   4.         // Open camera
   5.         mCamera = Camera.open(mCameraId);
   6.

   7.         // Set parameters
   8.         getCameraParameters();
   9.         mCamera.setParameters(mCameraParameters);
  10.

  11.         // Create frame formats
  12.         createFormats();
  13.

  14.         // Bind it to our camera frame
  15.         mCameraFrame = (GLFrame)context.getFrameManager().newBoundFrame(mOutputFormat,
  16.                                                                         GLFrame.EXTERNAL_TEXTURE,
  17.                                                                         0);
  18.         mSurfaceTexture = new SurfaceTexture(mCameraFrame.getTextureId());
  19.         try {
  20.             mCamera.setPreviewTexture(mSurfaceTexture);
  21.         } catch (IOException e) {
  22.             throw new RuntimeException("Could not bind camera surface texture: " +
  23.                                        e.getMessage() + "!");
  24.         }
  25.

  26.         // Connect SurfaceTexture to callback
  27.         mSurfaceTexture.setOnFrameAvailableListener(onCameraFrameAvailableListener);
  28.         // Start the preview
  29.         mNewFrameAvailable = false;
  30.         mCamera.startPreview();
  31.     }
其他方法先不做过多分析,这里实例化了SurfaceTexture的对象,看看他的构造函数:frameworks\base\libs\gui\SurfaceTexture.cpp


1. SurfaceTexture::SurfaceTexture(GLuint tex, bool allowSynchronousMode,
   2.         GLenum texTarget) :
   3.     mDefaultWidth(1),
   4.     mDefaultHeight(1),
   5.     mPixelFormat(PIXEL_FORMAT_RGBA_8888),
   6.     mBufferCount(MIN_ASYNC_BUFFER_SLOTS),
   7.     mClientBufferCount(0),
   8.     mServerBufferCount(MIN_ASYNC_BUFFER_SLOTS),
   9.     mCurrentTexture(INVALID_BUFFER_SLOT),
  10.     mCurrentTransform(0),
  11.     mCurrentTimestamp(0),
  12.     mNextTransform(0),
  13.     mNextScalingMode(NATIVE_WINDOW_SCALING_MODE_FREEZE),
  14.     mTexName(tex),
  15.     mSynchronousMode(false),
  16.     mAllowSynchronousMode(allowSynchronousMode),
  17.     mConnectedApi(NO_CONNECTED_API),
  18.     mAbandoned(false),
  19.     mTexTarget(texTarget) {
  20.     // Choose a name using the PID and a process-unique ID.
  21.     mName = String8::format("unnamed-%d-%d", getpid(), createProcessUniqueId());
  22.

  23.     ST_LOGV("SurfaceTexture::SurfaceTexture");
  24.     sp<ISurfaceComposer> composer(ComposerService::getComposerService());
  25.     mGraphicBufferAlloc = composer->createGraphicBufferAlloc();
  26.     mNextCrop.makeInvalid();
  27.     memcpy(mCurrentTransformMatrix, mtxIdentity,
  28.             sizeof(mCurrentTransformMatrix));
  29. }
我们接着往下看,mSurfaceTexture->dequeueBuffer(&buf, mReqWidth, mReqHeight,mReqFormat, mReqUsage);
这里调用了类SurfaceTexture的方法dequeueBuffer


1. status_t SurfaceTexture::dequeueBuffer(int *outBuf, uint32_t w, uint32_t h,
   2.         uint32_t format, uint32_t usage) {
   3.     ST_LOGV("SurfaceTexture::dequeueBuffer");
   4.

   5.     if ((w && !h) || (!w && h)) {
   6.         ST_LOGE("dequeueBuffer: invalid size: w=%u, h=%u", w, h);
   7.         return BAD_VALUE;
   8.     }
   9.

  10.     Mutex::Autolock lock(mMutex);
  11.

  12.     status_t returnFlags(OK);
  13.

  14.     int found, foundSync;
  15.     int dequeuedCount = 0;
  16.     bool tryAgain = true;
  17.     while (tryAgain) {
  18.         if (mAbandoned) {
  19.             ST_LOGE("dequeueBuffer: SurfaceTexture has been abandoned!");
  20.             return NO_INIT;
  21.         }
  22.

  23.         // We need to wait for the FIFO to drain if the number of buffer
  24.         // needs to change.
  25.         //
  26.         // The condition "number of buffers needs to change" is true if
  27.         // - the client doesn't care about how many buffers there are
  28.         // - AND the actual number of buffer is different from what was
  29.         // set in the last setBufferCountServer()
  30.         // - OR -
  31.         // setBufferCountServer() was set to a value incompatible with
  32.         // the synchronization mode (for instance because the sync mode
  33.         // changed since)
  34.         //
  35.         // As long as this condition is true AND the FIFO is not empty, we
  36.         // wait on mDequeueCondition.
  37.

  38.         const int minBufferCountNeeded = mSynchronousMode ?
  39.                 MIN_SYNC_BUFFER_SLOTS : MIN_ASYNC_BUFFER_SLOTS;
  40.

  41.         const bool numberOfBuffersNeedsToChange = !mClientBufferCount &&
  42.                 ((mServerBufferCount != mBufferCount) ||
  43.                         (mServerBufferCount < minBufferCountNeeded));
  44.

  45.         if (!mQueue.isEmpty() && numberOfBuffersNeedsToChange) {
  46.             // wait for the FIFO to drain
  47.             mDequeueCondition.wait(mMutex);
  48.             // NOTE: we continue here because we need to reevaluate our
  49.             // whole state (eg: we could be abandoned or disconnected)
  50.             continue;
  51.         }
  52.

  53.         if (numberOfBuffersNeedsToChange) {
  54.             // here we're guaranteed that mQueue is empty
  55.             freeAllBuffersLocked();
  56.             mBufferCount = mServerBufferCount;
  57.             if (mBufferCount < minBufferCountNeeded)
  58.                 mBufferCount = minBufferCountNeeded;
  59.             mCurrentTexture = INVALID_BUFFER_SLOT;
  60.             returnFlags |= ISurfaceTexture::RELEASE_ALL_BUFFERS;
  61.         }
  62.

  63.         // look for a free buffer to give to the client
  64.         found = INVALID_BUFFER_SLOT;
  65.         foundSync = INVALID_BUFFER_SLOT;
  66.         dequeuedCount = 0;
  67.         for (int i = 0; i < mBufferCount; i++) {
  68.             const int state = mSlots[i].mBufferState;
  69.             if (state == BufferSlot::DEQUEUED) {
  70.                 dequeuedCount++;
  71.             }
  72.

  73.             // if buffer is FREE it CANNOT be current
  74.             LOGW_IF((state == BufferSlot::FREE) && (mCurrentTexture==i),
  75.                     "dequeueBuffer: buffer %d is both FREE and current!", i);
  76.

  77.             if (ALLOW_DEQUEUE_CURRENT_BUFFER) {
  78.                 if (state == BufferSlot::FREE || i == mCurrentTexture) {
  79.                     foundSync = i;
  80.                     if (i != mCurrentTexture) {
  81.                         found = i;
  82.                         break;
  83.                     }
  84.                 }
  85.             } else {
  86.                 if (state == BufferSlot::FREE) {
  87.                     foundSync = i;
  88.                     found = i;
  89.                     break;
  90.                 }
  91.             }
  92.         }
  93.

  94.         // clients are not allowed to dequeue more than one buffer
  95.         // if they didn't set a buffer count.
  96.         if (!mClientBufferCount && dequeuedCount) {
  97.             return -EINVAL;
  98.         }
  99.

 100.         // See whether a buffer has been queued since the last setBufferCount so
 101.         // we know whether to perform the MIN_UNDEQUEUED_BUFFERS check below.
 102.         bool bufferHasBeenQueued = mCurrentTexture != INVALID_BUFFER_SLOT;
 103.         if (bufferHasBeenQueued) {
 104.             // make sure the client is not trying to dequeue more buffers
 105.             // than allowed.
 106.             const int avail = mBufferCount - (dequeuedCount+1);
 107.             if (avail < (MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode))) {
 108.                 ST_LOGE("dequeueBuffer: MIN_UNDEQUEUED_BUFFERS=%d exceeded "
 109.                         "(dequeued=%d)",
 110.                         MIN_UNDEQUEUED_BUFFERS-int(mSynchronousMode),
 111.                         dequeuedCount);
 112.                 return -EBUSY;
 113.             }
 114.         }
 115.

 116.         // we're in synchronous mode and didn't find a buffer, we need to wait
 117.         // for some buffers to be consumed
 118.         tryAgain = mSynchronousMode && (foundSync == INVALID_BUFFER_SLOT);
 119.         if (tryAgain) {
 120.             mDequeueCondition.wait(mMutex);
 121.         }
 122.     }
 123.

 124.     if (mSynchronousMode && found == INVALID_BUFFER_SLOT) {
 125.         // foundSync guaranteed to be != INVALID_BUFFER_SLOT
 126.         found = foundSync;
 127.     }
 128.

 129.     if (found == INVALID_BUFFER_SLOT) {
 130.         return -EBUSY;
 131.     }
 132.

 133.     const int buf = found;
 134.     *outBuf = found;
 135.

 136.     const bool useDefaultSize = !w && !h;
 137.     if (useDefaultSize) {
 138.         // use the default size
 139.         w = mDefaultWidth;
 140.         h = mDefaultHeight;
 141.     }
 142.

 143.     const bool updateFormat = (format != 0);
 144.     if (!updateFormat) {
 145.         // keep the current (or default) format
 146.         format = mPixelFormat;
 147.     }
 148.

 149.     // buffer is now in DEQUEUED (but can also be current at the same time,
 150.     // if we're in synchronous mode)
 151.     mSlots[buf].mBufferState = BufferSlot::DEQUEUED;
 152.

 153.     const sp<GraphicBuffer>& buffer(mSlots[buf].mGraphicBuffer);
 154.     if ((buffer == NULL) ||
 155.         (uint32_t(buffer->width) != w) ||
 156.         (uint32_t(buffer->height) != h) ||
 157.         (uint32_t(buffer->format) != format) ||
 158.         ((uint32_t(buffer->usage) & usage) != usage))
 159.     {
 160.         usage |= GraphicBuffer::USAGE_HW_TEXTURE;
 161.         status_t error;
 162.         sp<GraphicBuffer> graphicBuffer(
 163.                 mGraphicBufferAlloc->createGraphicBuffer(
 164.                         w, h, format, usage, &error));
 165.         if (graphicBuffer == 0) {
 166.             ST_LOGE("dequeueBuffer: SurfaceComposer::createGraphicBuffer "
 167.                     "failed");
 168.             return error;
 169.         }
 170.         if (updateFormat) {
 171.             mPixelFormat = format;
 172.         }
 173.         mSlots[buf].mGraphicBuffer = graphicBuffer;
 174.         mSlots[buf].mRequestBufferCalled = false;
 175.         if (mSlots[buf].mEglImage != EGL_NO_IMAGE_KHR) {
 176.             eglDestroyImageKHR(mSlots[buf].mEglDisplay, mSlots[buf].mEglImage);
 177.             mSlots[buf].mEglImage = EGL_NO_IMAGE_KHR;
 178.             mSlots[buf].mEglDisplay = EGL_NO_DISPLAY;
 179.         }
 180.         returnFlags |= ISurfaceTexture::BUFFER_NEEDS_REALLOCATION;
 181.     }
 182.     return returnFlags;
 183. }
到这里为止,其实底层调用的那些方法最终在这里都有实现,应该也已经走到了系统的ui层,本想到此为止,我还是深入看看这个方法吧
很纠结啊,看的

当已存在申请的buffer,返回buffer序号
在初始化SurfaceTexture对象时,上面的构造函数中实例化mGraphicBufferAlloc = composer->createGraphicBufferAlloc();
createGraphicBufferAlloc的实现在以下路径:frameworks\base\libs\gui\ISurfaceComposer.cpp
1. virtual sp<IGraphicBufferAlloc> createGraphicBufferAlloc()
   2.     {
   3.         uint32_t n;
   4.         Parcel data, reply;
   5.         data.writeInterfaceToken(ISurfaceComposer::getInterfaceDescriptor());
   6.         remote()->transact(BnSurfaceComposer::CREATE_GRAPHIC_BUFFER_ALLOC, data, &reply);
   7.         return interface_cast<IGraphicBufferAlloc>(reply.readStrongBinder());
   8.     }
由上面接着他会调用IGraphicBufferAlloc::createGraphicBuffer(),路径:frameworks\base\libs\gui\IGraphicBufferAlloc.cpp
1. virtual sp<GraphicBuffer> createGraphicBuffer(uint32_t w, uint32_t h,
   2.             PixelFormat format, uint32_t usage, status_t* error) {
   3.         Parcel data, reply;
   4.         data.writeInterfaceToken(IGraphicBufferAlloc::getInterfaceDescriptor());
   5.         data.writeInt32(w);
   6.         data.writeInt32(h);
   7.         data.writeInt32(format);
   8.         data.writeInt32(usage);
   9.         remote()->transact(CREATE_GRAPHIC_BUFFER, data, &reply);
  10.         sp<GraphicBuffer> graphicBuffer;
  11.         status_t result = reply.readInt32();
  12.         if (result == NO_ERROR) {
  13.             graphicBuffer = new GraphicBuffer();
  14.             reply.read(*graphicBuffer);
  15.             // reply.readStrongBinder();
  16.             // here we don't even have to read the BufferReference from
  17.             // the parcel, it'll die with the parcel.
  18.         }
  19.         *error = result;
  20.         return graphicBuffer;
  21.     }



这里实例化GraphicBuffer类的对象,并且用返回的这个对象实例化graphicBuffer变量

这篇文章写得很吃力,不是理解的很透彻,其中可能很多思路根本就是错误的,待修正啊。。


你可能感兴趣的:(Android Camera数据流分析全程记录(overlay方式二))