Android Camera:总结

稀里糊涂写了一坨有关android camera的文章,大多没有条理,没有自己太多的理解,只是向上堆, 对搞清楚整个流程作用不大,到今天感觉有点明白了。

这个过程大概分为3部分:创建session native层就是创建stream/ 创建CaptureRequest/ 处理提交的CaptureRequest.

3个过程都是异步的,通过全局成员变量和索引号进行对应,其中在提交CaputeRequest时才申请Graphic buffer.

     1] 创建、配置stream: 最终形成Mapping of stream IDs to stream instances
    typedef KeyedVector<int, sp > StreamSet;

    StreamSet                  mOutputStreams;
 mOutputStreams.add(mNextStreamId, newStream);
2] 提交CapureRequest,最终形成mRequestQueue
3] Camera3Device::RequestThread::threadLoop()处理mRequestQueue
 
  

从代码结构如下:分为java Camera API2, native 层, HIDL, HAL层

Android Camera:总结_第1张图片

1] mCameraManager = (CameraManager) this.getSystemService(Context.CAMERA_SERVICE)

2] 调用openCamera传入CameraDevice.StateCallback回调函数, 在onOpened函数里得到创建的CameraDevice

3] 通过CameraDevice调用createCaptureSession,传入CameraCaptureSession.StateCallback回调函数,在onConfigured函数中

得到创建的CameraCaptureSession

4] 通过CameraDevice调用createCaptureRequest生成CaptureRequest

5] 通过3]生成的CameraCaptureSession调用函数capture等提交CapureRequest

6] 通过回调函数处理CaputeResult:CaputreResult分为两部分: buffer和MentaData

createCaptureSession: 创建、配置流

    public void createCaptureSession(List outputs,
            CameraCaptureSession.StateCallback callback, Handler handler)
 {
        List outConfigurations = new ArrayList<>(outputs.size());
        for (Surface surface : outputs) {
            outConfigurations.add(new OutputConfiguration(surface));
        }
        createCaptureSessionInternal(null, outConfigurations, callback, handler,

                /*operatingMode*/ICameraDeviceUser.NORMAL_MODE);

 }

把java层的Surface数据结构转换为OutConfiguration并通过binder传参

frameworks/base/core/java/android/hardware/camera2/params/OutputConfiguration.java    public OutputConfiguration(@NonNull Surface surface) {        this(SURFACE_GROUP_ID_NONE, surface, ROTATION_0);    }        /*从输入的surface得到多个关于surface的属性*/    public OutputConfiguration(int surfaceGroupId, @NonNull Surface surface, int rotation) {        checkNotNull(surface, "Surface must not be null");        checkArgumentInRange(rotation, ROTATION_0, ROTATION_270, "Rotation constant");        mSurfaceGroupId = surfaceGroupId;        mSurfaceType = SURFACE_TYPE_UNKNOWN;        mSurfaces = new ArrayList();        mSurfaces.add(surface);        mRotation = rotation;        mConfiguredSize = SurfaceUtils.getSurfaceSize(surface);        mConfiguredFormat = SurfaceUtils.getSurfaceFormat(surface);        mConfiguredDataspace = SurfaceUtils.getSurfaceDataspace(surface);        mConfiguredGenerationId = surface.getGenerationId();        mIsDeferredConfig = false;        mIsShared = false;

    }

class OutputConfiguration : public android::Parcelable {
    std::vector> mGbps; /*mGbps也是传入的?,是的,看av/camera/camera2/OutputConfiguration.cpp*/
    int                        mRotation;
    int                        mSurfaceSetID;
    int                        mSurfaceType;
    int                        mWidth;
    int                        mHeight;
    bool                       mIsDeferred;
    bool                       mIsShared;
}


cameraAPI2会通过mRemoteDevice调用到av/services/camera/libcameraservice/api2/CameraDeviceClient.cpp,所有通过mRemoteDevice从java层调用到Native层都经过该接口文件。

createStream

binder::Status CameraDeviceClient::createStream(
        const hardware::camera2::params::OutputConfiguration &outputConfiguration,
        /*out*/
        int32_t* newStreamId) {

    const std::vector>& bufferProducers = outputConfiguration.getGraphicBufferProducers();

 getGraphicBufferProducers以前认为很神秘,其实其实现

 在av/camera/camera2/OutputConfiguration.cpp,有几个surface 就有几个Producer:

mGbps.push_back(surface.graphicBufferProducer) 
    size_t numBufferProducers = bufferProducers.size();

    int surfaceType = outputConfiguration.getSurfaceType();
    std::vector> surfaces;
    std::vector> binders;

    OutputStreamInfo streamInfo;
    bool isStreamInfoValid = false;
    /*surface对应bufferProducers的个数决定stream的个数*/
    for (auto& bufferProducer : bufferProducers) {
        // Don't create multiple streams for the same target surface
        sp binder = IInterface::asBinder(bufferProducer);
        ssize_t index = mStreamMap.indexOfKey(binder);

        sp surface; /*这里创建了 native Surface*/
        res = createSurfaceFromGbp(streamInfo, isStreamInfoValid, surface, bufferProducer);

        binders.push_back(IInterface::asBinder(bufferProducer));
        surfaces.push_back(surface);
    }
    /*调用camera3 device接口创建stream*/
    mDevice->createStream(surfaces, deferredConsumer, streamInfo.width,
            streamInfo.height, streamInfo.format, streamInfo.dataSpace,
            static_cast(outputConfiguration.getRotation()),
            &streamId, outputConfiguration.getSurfaceSetID(), isShared);
   *newStreamId = streamId;

}

binder::Status CameraDeviceClient::createSurfaceFromGbp(
        OutputStreamInfo& streamInfo, bool isStreamInfoValid,
        sp& surface, const sp& gbp) {
   /*在native层创建surface, 并赋值OutputStreamInfo*/

   surface = new Surface(gbp, useAsync);

}

status_t Camera3Device::createStream(const std::vector<sp<Surface>>& consumers,

        bool hasDeferredConsumer, uint32_t width, uint32_t height, int format,
        android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
        int streamSetId, bool isShared, uint64_t consumerUsage) {
    sp newStream;
    newStream = new Camera3OutputStream(mNextStreamId, consumers[0],
                width, height, format, dataSpace, rotation,
                mTimestampOffset, streamSetId);
   mOutputStreams.add(mNextStreamId, newStream);
}

configureStreams

status_t Camera3Device::configureStreams(int operatingMode) {
    return configureStreamsLocked(operatingMode);
}

status_t Camera3Device::configureStreamsLocked(int operatingMode) {
    camera3_stream_configuration config;
    config.operation_mode = mOperatingMode;
    config.num_streams = (mInputStream != NULL) + mOutputStreams.size();

    Vector streams;
    streams.setCapacity(config.num_streams);

    for (size_t i = 0; i < mOutputStreams.size(); i++) {
        camera3_stream_t *outputStream;

        outputStream = mOutputStreams.editValueAt(i)->startConfiguration();

       /*从camera3OutputStream-> camera3_stream_t*/

        streams.add(outputStream);
    }
    // Do the HAL configuration; will potentially touch stream
    // max_buffers, usage, priv fields.

    res = mInterface->configureStreams(&config);
    for (size_t i = 0; i < mOutputStreams.size(); i++) {
        sp outputStream =
            mOutputStreams.editValueAt(i);
        if (outputStream->isConfiguring() && !outputStream->isConsumerConfigurationDeferred()) {
            res = outputStream->finishConfiguration(); /*有关consumer相关的操作*/
        }
    }

    // Request thread needs to know to avoid using repeat-last-settings protocol
    // across configure_streams() calls
    mRequestThread->configurationComplete(mIsConstrainedHighSpeedConfiguration);
}

status_t Camera3Stream::finishConfiguration() {
    configureQueueLocked();
    mState = STATE_CONFIGURED;
    return res;
}

status_t Camera3OutputStream::configureQueueLocked() {
    status_t res;

    if ((res = Camera3IOStreamBase::configureQueueLocked()) != OK) { /*even do nothing*/
        return res;
    }
    if ((res = configureConsumerQueueLocked()) != OK) {
        return res;
    }
    return OK;
}

status_t Camera3OutputStream::configureConsumerQueueLocked() {
    // Configure consumer-side ANativeWindow interface. The listener may be used
    // to notify buffer manager (if it is used) of the returned buffers.
    res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
            /*listener*/mBufferReleasedListener,
            /*reportBufferRemoval*/true);

}

 mInterface->configureStreams(&config)

hardware/libhardware/include/hardware/camera3.h
该文件包含传递到HAL层的数据结构,Framework层的代码最终起作用只能通过该文件中数据结构

typedef struct camera3_stream {

 int stream_type;

    uint32_t width;
    uint32_t height;
    int format;
    uint32_t usage;
    uint32_t max_buffers;
    void *priv;
    android_dataspace_t data_space;
    int rotation;
    void *reserved[7];
} camera3_stream_t;


typedef struct camera3_stream_configuration {
    uint32_t num_streams;
    camera3_stream_t **streams;
    uint32_t operation_mode;
} camera3_stream_configuration_t;

从这里可以看出CreateCaptureSession的过程并没有申请Graphic buffer,虽然其中有创建native surface.


生成Graphic buffer的过程

ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);

typedef struct ANativeWindowBuffer
{
    struct android_native_base_t common;
    int width;
    int height;
    int stride;
    int format;
    int usage_deprecated;
    uintptr_t layerCount;
    void* reserved[1];
    const native_handle_t* handle;
    uint64_t usage;
    // we needed extra space for storing the 64-bits usage flags
    // the number of slots to use from reserved_proc depends on the
    // architecture.
    void* reserved_proc[8 - (sizeof(uint64_t) / sizeof(void*))];
} ANativeWindowBuffer_t;

typedef struct camera3_stream_buffer {
    camera3_stream_t *stream;
    buffer_handle_t *buffer;
    int status;
    int acquire_fence;
    int release_fence;

} camera3_stream_buffer_t;

分配Graphic memory所在的过程

Camera3Device::RequestThread::threadLoop() ->
prepareHalRequests() -> getBuffer


status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer,
        const std::vector& surface_ids) {
    getBufferLocked(buffer, surface_ids);
    return res;

}

status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer,
        const std::vector&) {
    ANativeWindowBuffer* anb;
    getBufferLockedCommon(&anb, &fenceFd);
    /**
     * FenceFD now owned by HAL except in case of error,
     * in which case we reassign it to acquire_fence
     */
    handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
                        /*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);
    return OK;
}

/*
  * 通过ANativeWindow调用dequeBuffer得带ANativeWindowBuffer其成员handle就指向Gbuffer
 */
Camera3OutputStream::getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd) {
        sp currentConsumer = mConsumer;  /*直接从surface转换为ANativeWindow?*/
        nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
        res = currentConsumer->dequeueBuffer(currentConsumer.get(), anb, fenceFd);
        nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
        mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
}

/*赋值camera3_stream_buffer*/
void Camera3IOStreamBase::handoutBufferLocked(camera3_stream_buffer &buffer,
                                              buffer_handle_t *handle,
                                              int acquireFence,
                                              int releaseFence,
                                              camera3_buffer_status_t status,
                                              bool output) {
    // Handing out a raw pointer to this object. Increment internal refcount.
    incStrong(this);
    buffer.stream = this;
    buffer.buffer = handle;
    buffer.acquire_fence = acquireFence;
    buffer.release_fence = releaseFence;
    buffer.status = status;

}

CameraDeviceClient::submitRequestList

通过binder传递到native层的CaptureRequest:

struct CaptureRequest : public Parcelable {
    CameraMetadata          mMetadata;
    Vector >    mSurfaceList;
    bool                    mIsReprocess;

};

最终传递到HAL层的capture request

typedef struct camera3_capture_request {
    uint32_t frame_number;
    const camera_metadata_t *settings;
    camera3_stream_buffer_t *input_buffer;
    uint32_t num_output_buffers;
    const camera3_stream_buffer_t *output_buffers;

} camera3_capture_request_t;

binder::Status CameraDeviceClient::submitRequestList(
        const std::vector& requests,
        bool streaming,
        /*out*/
        hardware::camera2::utils::SubmitInfo *submitInfo) {
    List metadataRequestList;
    std::list surfaceMapList;
    submitInfo->mRequestId = mRequestIdCounter;

    for (auto&& request: requests) {
        CameraMetadata metadata(request.mMetadata);
        /**
         * Write in the output stream IDs and map from stream ID to surface ID
         * which we calculate from the capture request's list of surface target
         */
        SurfaceMap surfaceMap;
        Vector outputStreamIds;
        for (sp surface : request.mSurfaceList) {
            if (surface == 0) continue;
            sp gbp = surface->getIGraphicBufferProducer();
            int idx = mStreamMap.indexOfKey(IInterface::asBinder(gbp));
            const StreamSurfaceId& streamSurfaceId = mStreamMap.valueAt(idx);
            if (surfaceMap.find(streamSurfaceId.streamId()) == surfaceMap.end()) {
                surfaceMap[streamSurfaceId.streamId()] = std::vector();
                outputStreamIds.push_back(streamSurfaceId.streamId());
            }
            surfaceMap[streamSurfaceId.streamId()].push_back(streamSurfaceId.surfaceId());
            ALOGV("%s: Camera %s: Appending output stream %d surface %d to request",
                    __FUNCTION__, mCameraIdStr.string(), streamSurfaceId.streamId(),
                    streamSurfaceId.surfaceId());
        }
        metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0], outputStreamIds.size());
        metadata.update(ANDROID_REQUEST_ID, &(submitInfo->mRequestId), /*size*/1);
        loopCounter++; // loopCounter starts from 1
        ALOGV("%s: Camera %s: Creating request with ID %d (%d of %zu)",
                __FUNCTION__, mCameraIdStr.string(), submitInfo->mRequestId,
                loopCounter, requests.size());
        metadataRequestList.push_back(metadata);
        surfaceMapList.push_back(surfaceMap);
    }
    mRequestIdCounter++;
   {
        err = mDevice->captureList(metadataRequestList, surfaceMapList,
                &(submitInfo->mLastFrameNumber));
        ALOGV("%s: requestId = %d ", __FUNCTION__, submitInfo->mRequestId);
    }
    ALOGV("%s: Camera %s: End of function", __FUNCTION__, mCameraIdStr.string());
    return res;
}

status_t Camera3Device::captureList(const List &requests,
                                    const std::list &surfaceMaps,
                                    int64_t *lastFrameNumber) {
    return submitRequestsHelper(requests, surfaceMaps, /*repeating*/false, lastFrameNumber);
}

status_t Camera3Device::submitRequestsHelper(
        const List &requests,
        const std::list &surfaceMaps,
        bool repeating,
        /*out*/
        int64_t *lastFrameNumber) {
    RequestList requestList;
    convertMetadataListToRequestListLocked(requests, surfaceMaps,
            repeating, /*out*/&requestList);
    mRequestThread->queueRequestList(requestList, lastFrameNumber);
}

status_t Camera3Device::RequestThread::queueRequestList(
        List > &requests,
        /*out*/
        int64_t *lastFrameNumber) {
    for (List >::iterator it = requests.begin(); it != requests.end();
            ++it) {
        mRequestQueue.push_back(*it);/*最终把CaputreRequest到放到mRequestQueue队列*/
    }
    return OK;

}

RequestThread::threadLoop处理CaptureRequest

bool Camera3Device::RequestThread::threadLoop() {
    // Wait for the next batch of requests.
    waitForNextRequestBatch();
    // Prepare a batch of HAL requests and output buffers.
    res = prepareHalRequests();
    // Inform waitUntilRequestProcessed thread of a new request ID
    {
        Mutex::Autolock al(mLatestRequestMutex);

        mLatestRequestId = latestRequestId;
        mLatestRequestSignal.signal();
    }
    nsecs_t tRequestStart = systemTime(SYSTEM_TIME_MONOTONIC);
    if (mInterface->supportBatchRequest()) {
        submitRequestSuccess = sendRequestsBatch();
    } else {
        submitRequestSuccess = sendRequestsOneByOne();
    }
    nsecs_t tRequestEnd = systemTime(SYSTEM_TIME_MONOTONIC);
    mRequestLatency.add(tRequestStart, tRequestEnd);

}

status_t Camera3Device::RequestThread::prepareHalRequests() {
    for (size_t i = 0; i < mNextRequests.size(); i++) {
        auto& nextRequest = mNextRequests.editItemAt(i);
        sp captureRequest = nextRequest.captureRequest;
       
camera3_capture_request_t* halRequest = &nextRequest.halRequest;
        Vector* outputBuffers = &nextRequest.outputBuffers;

        // Prepare a request to HAL
        halRequest->frame_number = captureRequest->mResultExtras.frameNumber;
        outputBuffers->insertAt(camera3_stream_buffer_t(), 0,captureRequest->mOutputStreams.size());
        halRequest->output_buffers = outputBuffers->array();
        for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {

            sp outputStream = captureRequest->mOutputStreams.editItemAt(j);

            res = outputStream->getBuffer (&outputBuffers->editItemAt(j),captureRequest->mOutputSurfaces[j]);
            halRequest->num_output_buffers++;
        }
        totalNumBuffers += halRequest->num_output_buffers;
        // Log request in the in-flight queue
        sp parent = mParent.promote();
        res = parent->registerInFlight(halRequest->frame_number,
                totalNumBuffers, captureRequest->mResultExtras,
                /*hasInput*/halRequest->input_buffer != NULL,
                hasCallback,
halRequest
,
                calculateMaxExpectedDuration(halRequest->settings));
     }
}

typedef struct camera3_capture_request {
    uint32_t frame_number;
    const camera_metadata_t *settings;
    camera3_stream_buffer_t *input_buffer;
    uint32_t num_output_buffers;
    const camera3_stream_buffer_t *output_buffers;

} camera3_capture_request_t;

最终调用的接口processCaptureRequest(&nextRequest.halRequest)

其中向HAL传递的参数包括camera_metadata_t/camera3_stream_buffer_t

bool Camera3Device::RequestThread::sendRequestsOneByOne() {
    for (auto& nextRequest : mNextRequests) {
        // Submit request and block until ready for next one
        ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number);
       
mInterface->processCaptureRequest(&nextRequest.halRequest);

    }
    return true;
}

你可能感兴趣的:(Android Camera:总结)