Android Native层开发Camera应用的方法

现在大部分Android Camera 应用都是基于Android标准API1/API2进行开发的,但是其实Android Camera应用也是可以直接在Android Native层进行开发,而且存在多种方法:
我们认为至少存在三种方法

  1. 直接和CameraSerive通信进行开发(Android NDK libcamera2ndk就是使用该方案)
  2. 直接和CameraProvider通信进行开发(高通的QVRService就是这样做的)
  3. 直接使用camx接口(Hal3Test就是这样做的)

从android7.0(API level24)开始,Android NDK 集成了libcamera2ndk.so,其提供了Native层开发Camera应用的API,可以参考下
android_graphics_cts_CameraGpuCtsActivity.cpp,其实现原理就是直接和CameraSerive通信进行开发,由于libcamera2ndk.so不能修改,所以我们并没有直接使用该库。

直接在Android Native 层开发Camera应用的优缺点:

优点:

  1. 摒弃了Java层调用,能在性能方面存在一定的优化
  2. 由于高性能的算法大部分都是基于c/c++开发的,在Native层开发相机应用可以方便算法集成

缺点:
3. 参考资料少,开发难度稍高,会遇到很多bug,当然都是可以解决的。

我现在只针对第一种方式进行介绍下(需要Android源码环境),第二、三种方式没有具体去实现,就不介绍了。

1. 直接和CameraSerive通信进行开发进行Native Camera开发

1.1 CameraSerive接口定义

CameraService所有对外接口都定义在ICameraService.aidl文件中。
主要定义的接口有:

    /**
     * Open a camera device through the new camera API
     * Only supported for device HAL versions >= 3.2
     */
    ICameraDeviceUser connectDevice(ICameraDeviceCallbacks callbacks,
            String cameraId,
            String opPackageName,
            int clientUid);
    CameraStatus[] addListener(ICameraServiceListener listener);            
    CameraMetadataNative getCameraCharacteristics(String cameraId);            

1.2 CameraService 获取

首先需要从IServiceManager中获CameraService,代码如下:

    sp<IServiceManager> sm = defaultServiceManager();
    sp<IBinder> binder = sm->getService(String16("media.camera"));
    sp<ICameraService>mspService = interface_cast<ICameraService>(binder);

1.3 打开相机

CameraService打开相机的接口有好几个,我们使用了connectDevice,代码如下:

    sp<ICameraDeviceUser> device;
    binder::Status res = mspService->connectDevice(callbacks, cameraId, clientPackageName,
            ICameraService::USE_CALLING_UID, &device);

connectDevice会返回一个BpCameraDeviceUser 代理对象,表示一个相机设备,其继承自ICameraDeviceUserICameraDeviceUser定义如下:

   //ICameraDeviceUser.aidl
   void disconnect();

    const int NO_IN_FLIGHT_REPEATING_FRAMES = -1;

    SubmitInfo submitRequest(in CaptureRequest request, boolean streaming);
    SubmitInfo submitRequestList(in CaptureRequest[] requestList, boolean streaming);

    /**
     * Cancel the repeating request specified by requestId
     * Returns the frame number of the last frame that will be produced from this
     * repeating request, or NO_IN_FLIGHT_REPEATING_FRAMES if no frames were produced
     * by this repeating request.
     *
     * Repeating request may be stopped by camera device due to an error. Canceling a stopped
     * repeating request will trigger ERROR_ILLEGAL_ARGUMENT.
     */
    long cancelRequest(int requestId);
    /**
     * Begin the device configuration.
     *
     * 

* beginConfigure must be called before any call to deleteStream, createStream, * or endConfigure. It is not valid to call this when the device is not idle. *

*/ void beginConfigure(); void endConfigure(int operatingMode); void deleteStream(int streamId); /** * Create an output stream * *

Create an output stream based on the given output configuration

* * @param outputConfiguration size, format, and other parameters for the stream * @return new stream ID */
int createStream(in OutputConfiguration outputConfiguration);

1.4 创建Stream流

在打开相机后就可以创建或者删除Stream了,在创建流Stream之前需要先准备Surface资源

1.4.1 Surface资源准备

  1. 如果预览数据不需要显示到真实的物理屏上,就可以通过BufferQueue::createBufferQueue创建出surface来,方法是:
void setupPreviewSurface()
{
    ALOGD("%s,%d E .", __FUNCTION__,__LINE__);

    // Setup a buffer queue
    BufferQueue::createBufferQueue(&mspGbProducer, &mspGbConsumer);
    sp<BufferItemConsumer> consumer;

    consumer = new BufferItemConsumer(mspGbConsumer, GRALLOC_USAGE_SW_READ_NEVER,
            /*maxImages*/ MAX_BUFFER_NUM, /*controlledByApp*/ true);

    String8 consumerName = String8::format("ImageReader-%dx%df%xm%d-%d",
            m_iWidth, m_iHeight, m_iFormat, MAX_BUFFER_NUM, getpid());
   //创建帧回调监听
    mPreviewListener = new ConsumerBase::FrameAvailableListener();
    
    consumer->setName(consumerName);
    consumer->setDefaultBufferSize(m_iWidth, m_iHeight);
    consumer->setDefaultBufferFormat(m_iFormat);
    //给消费者注册帧回调监听
    consumer->setFrameAvailableListener(mPreviewListener);

    ALOGD("%s,%d X .", __FUNCTION__,__LINE__);
}
  1. 如果预览帧数据需要显示到真实的物理屏上,就需要向SurfaceFlinger申请一个Surface
 sp<ANativeWindow> getSurfaceFromSF() {
    status_t err;
    sp<SurfaceComposerClient> surfaceComposerClient = new SurfaceComposerClient;
    err = surfaceComposerClient->initCheck();

    // Get main display parameters.
    sp<IBinder> mainDpy = SurfaceComposerClient::getBuiltInDisplay(
            ISurfaceComposer::eDisplayIdMain);
    DisplayInfo mainDpyInfo;
    err = SurfaceComposerClient::getDisplayInfo(mainDpy, &mainDpyInfo);

    uint32_t width, height;
    if (mainDpyInfo.orientation != DISPLAY_ORIENTATION_0 &&
            mainDpyInfo.orientation != DISPLAY_ORIENTATION_180) {
        // rotated
        width = mainDpyInfo.h;
        height = mainDpyInfo.w;
    } else {
        width = mainDpyInfo.w;
        height = mainDpyInfo.h;
    }

    sp<SurfaceControl> sc = surfaceComposerClient->createSurface(
            String8("mytest"), width, height,
            PIXEL_FORMAT_RGBX_8888, ISurfaceComposerClient::eOpaque);

    SurfaceComposerClient::openGlobalTransaction();
    err = sc->setLayer(0x7FFFFFFF);     // always on top
    err = sc->show();
    SurfaceComposerClient::closeGlobalTransaction();
    sp<ANativeWindow> anw = sc->getSurface();
    return anw
}

当然也存在其他方法,如从Activity中获取,欢迎补充

1.4.2 Stream创建

surface资源准备好后就可以创建Stream了,代码如下:

int configureDevice()
{
   //preview surface准备
    setupPreviewSurface();
   //capture surface准备
    setupCaptureSurface();

    ALOGD("waitUntilIdle E");
    res = mspDevice->waitUntilIdle();
    ALOGD("waitUntilIdle X");

    res = mspDevice->beginConfigure();
    
    ALOGD("create preview Stream E");
    OutputConfiguration previewOutPut(mspGbProducer,          /*rotation*/ 0);
    res = mspDevice->createStream(previewOutPut, &mPreviewStreamId);
    ALOGD("create preview Stream mPreviewStreamId = %d X",mPreviewStreamId);

    ALOGD("create capture Stream E");
    OutputConfiguration jpegOutPut = OutputConfiguration(mspJpegGbpProducer,  /*rotation*/ 0);
    res = mspDevice->createStream(jpegOutPut, &mCaptureStreamId);
    ALOGD("create capture Stream  mCaptureStreamId = %d X",mCaptureStreamId);

    ALOGD("endConfigure E .");
    res = mspDevice->endConfigure(/*isConstrainedHighSpeed*/ false);
    ALOGD("endConfigure X .");
    return 0;
}    

1.4 预览申请

创建流成功后,就可以开启startpreviewtakepicture了,在API2中是通过提交申请的方式来获取帧数据的,代码如下:

int startPreview() 
{
    CameraMetadata    mRequestTemplate;
    int templateId = camera2::ICameraDeviceUser::TEMPLATE_PREVIEW;
    //createDefaultRequest获取的metadata只包含一些默认值
    //如果想更新其里边的值,就需要更新metadata了
    res = mspDevice->createDefaultRequest(templateId, /*out*/ &mRequestTemplate);

    //for rbg camera, open qcom faceDetect feature defaultily.
    if (m_iCameraId == CAMERA_RGB_ID) {
        uint8_t faceDetectType  = TYPE_BYTE;
        uint8_t faceDetectValue = ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE;
        updateMetaData("android.statistics.faceDetectMode",faceDetectType,&faceDetectValue,1);
    }

    std::vector< ::android::hardware::camera2::CaptureRequest>requestList;

    sp<Surface> previewSurface = new Surface(mspGbProducer, /*controlledByApp*/ true);

    //preview only
    camera2::CaptureRequest previewTargetRequest;
    previewTargetRequest.mMetadata    = requestMeta;
    previewTargetRequest.mIsReprocess = false;
    previewTargetRequest.mSurfaceList.add(previewSurface);
    requestList.push_back(previewTargetRequest);

    camera2::utils::SubmitInfo info;
    //申请开启预览
    res = mspDevice->submitRequestList(requestList, /*streaming*/ true, /*out*/ &info);

    int32_t requestId       = info.mRequestId;
    int64_t lastFrameNumber = info.mLastFrameNumber;

    return 0;
}

1.5 Metadata更新

在需要修改默认的MetaData时,就需要跟新CameraMetadata ,更新方法如下


int updateMetaData(CameraMetadata* metaData,char* key,uint8_t type,
        void* data,int count){
    status_t ret;
    uint32_t tag = 0;
    sp<VendorTagDescriptor> vTags;
    sp<VendorTagDescriptorCache> cache = VendorTagDescriptorCache::getGlobalVendorTagCache();
    if (cache.get()) {
        const camera_metadata_t *metaBuffer = metaData->getAndLock();
        metadata_vendor_id_t vendorId = get_camera_metadata_vendor_id(metaBuffer);
        metaData->unlock(metaBuffer);
        cache->getVendorTagDescriptor(vendorId, &vTags);
    }
    ret = CameraMetadata::getTagFromName(key,vTags.get(), &tag);
    ret = metaData->update(tag,reinterpret_cast<const  T*>(data),count);
    return ret;
}

1.6 帧数据获取

在setupPreviewSurface时,创建了一对生产者消费者对象mspGbProducer,mspGbConsumer
其中mspGbProducer会封装成surface提供给CameraService使用,
同时会给mspGbConsumer注册了帧回调监听:

   //mPreviewListener是ConsumerBase::FrameAvailableListener的实现类
    consumer->setFrameAvailableListener(mPreviewListener);

mPreviewListener实现了ConsumerBase::FrameAvailableListener接口,我们只介绍下
onFrameAvailable(const BufferItem &)实现。

void ConsumerBaseImple::onFrameAvailable(const BufferItem &)
{
    //Part of ImageReader_imageSetup
    BufferItem *buffer = new BufferItem;
    
    BufferItemConsumer *bufferConsumer = getBufferConsumer();
    //获取 graphicbuffer
    status_t res = mspGbConsumer->acquireBuffer(buffer, 0);
    //保存graphicbuffer
    dumpImage(buffer);
    // Part of ImageReader_imageRelease
    //归还graphicbuffer
    sp<Fence> releaseFence = Fence::NO_FENCE;
    mspGbConsumer->releaseBuffer(*buffer, releaseFence);
    returnBufferItem(buffer);
}

上述流程graphicbuffer发生的状态变化为:
QUEUED->ACQUIRED->RELEASE
(应该还考虑下acquireFece和releaseFence的变化,这是之前实现的一个潜在问题!!!)
在acquireBuffer后就可以使用获取的graphicbuffer,我们这里只是保存了下,其他使用,如帧数据二次处理啥的都应该在这个地方实现。
我们直接介绍下如何保存graphicbuffer吧。

void dumpImage(BufferItem *buffer)
{
    uint32_t width      = buffer->mGraphicBuffer->getWidth();
    uint32_t height     = buffer->mGraphicBuffer->getHeight();
    int      format     = buffer->mGraphicBuffer->getPixelFormat();
    uint64_t Usage      = buffer->mGraphicBuffer->getUsage();
    uint32_t stride     = buffer->mGraphicBuffer->getStride();
    uint64_t frameNumber=buffer->mFrameNumber;

    int readerFormat        = buffer->mGraphicBuffer->getPixelFormat();
    LockedImage lockedImg   = LockedImage();
    //从BufferItem中获取yuv数据或者raw16/raw10数据,jpeg数据等
    lockImageFromBuffer(buffer.mGraphicBuffer,GRALLOC_USAGE_SW_READ_OFTEN,buffer->mCrop,buffer->mFence->dup(), &lockedImg);

    uint8_t *pData      = NULL;
    uint32_t dataSize   = 0;
    int bytesPerPixel   = 0;//plant
    int32_t fmt = lockedImg.flexFormat;
    switch (fmt)
    {
        case HAL_PIXEL_FORMAT_YCbCr_420_888:
        //dump nv12
        dumpImage(lockedImg.data,lockedImg.dataCr,buffer->mFrameNumber,lockedImg.stride,lockedImg.height);
        //dump nv21
        dumpImage(lockedImg.data,lockedImg.dataCb,buffer->mFrameNumber,lockedImg.stride,lockedImg.height);
            break;
        case HAL_PIXEL_FORMAT_RAW16:
            // Single plane 16bpp bayer data.
            break;
        case HAL_PIXEL_FORMAT_RAW10:
            pData       = lockedImg.data;
            dataSize    = lockedImg.stride * lockedImg.height;
        case HAL_PIXEL_FORMAT_BLOB:
        case HAL_PIXEL_FORMAT_JPEG:
        case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
            pData       = lockedImg.data;
            break;
        default:;
    }

}

我们介绍下yuv数据获取方法

status_t lockImageFromBuffer(sp<GraphicBuffer> buffer, uint32_t inUsage,
                             const Rect &rect, int fenceFd, LockedImage *outputImage)
{
    if (HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED == buffer->getPixelFormat())
    {
        // This is the only opaque format exposed in the ImageFormat public API.
        // Note that we do support CPU access for HAL_PIXEL_FORMAT_RAW_OPAQUE
        // (ImageFormat#RAW_PRIVATE) so it doesn't count as opaque here.
        ALOGE("Opaque format buffer is not lockable!");
        return BAD_VALUE;
    }

    void *pData         = NULL;
    android_ycbcr ycbcr = android_ycbcr();
    status_t res;
    int format          = buffer->getPixelFormat();
    int flexFormat      = format;
    if (isPossiblyYUV(format))
    {   //从buff中读取yuv88 数据
        res = buffer->lockAsyncYCbCr(inUsage, rect, &ycbcr, fenceFd);
        pData = ycbcr.y;
        flexFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
    }

    // lockAsyncYCbCr for YUV is unsuccessful.
    if (pData == NULL)
    {   //获取raw数据或者jpeg
        res = buffer->lockAsync(inUsage, rect, &pData, fenceFd);
    }

    outputImage->data       = reinterpret_cast<uint8_t *>(pData);
    outputImage->width      = buffer->getWidth();
    outputImage->height     = buffer->getHeight();
    outputImage->format     = format;
    outputImage->flexFormat = flexFormat;
    outputImage->stride     = (ycbcr.y != NULL) ? static_cast<uint32_t>(ycbcr.ystride) : buffer->getStride();

    outputImage->dataCb         = reinterpret_cast<uint8_t *>(ycbcr.cb);
    outputImage->dataCr         = reinterpret_cast<uint8_t *>(ycbcr.cr);
    outputImage->chromaStride   = static_cast<uint32_t>(ycbcr.cstride);
    outputImage->chromaStep     = static_cast<uint32_t>(ycbcr.chroma_step);
    return OK;
}

下边是dump方法,我们就不介绍了

void dumpImage(unsigned char* pY, unsigned char* pUV,
        uint32_t frameid,uint32_t stride,uint32_t height) {
    char buf[FILENAME_MAX];
    memset(buf, 0, sizeof(buf));

   snprintf(buf, sizeof(buf), CAMERA_DUMP_FRM_LOCATION "frame_%dx%d_%" PRIu64.nv12",
        stride,height,frameid);
   //snprintf(buf, sizeof(buf), CAMERA_DUMP_FRM_LOCATION "frame_%dx%d_%" PRIu64.nv21",
        //stride,height,frameid);
    ALOGE("%s,%d buf : %s", __FUNCTION__,__LINE__,buf);
    
    int file_fd = open(buf, O_RDWR | O_CREAT, 0644);
    uint32_t ySize = stride*height;
    if (file_fd >= 0)
    {
        ssize_t written_yLen    = write(file_fd, pY, ySize);
        ssize_t written_UVLen   = write(file_fd, pUV, ySize/2);
    }
}

至此完成了直接和CameraSerive通信进行开发进行Native Camera开发简单介绍。

你可能感兴趣的:(Android,camera)