从JNI的调用看,Java通过surfacetexture来显示preview。CameraService通过surfacetexture获取ANativeWindow,向OpenGL传递数据。
ANativeWindow的函数实现是在CameraHardwareInterface.h中,
struct camera_preview_window { struct preview_stream_ops nw; void *user; }; struct camera_preview_window mHalPreviewWindow;
preview_stream_ops 的定义在hardware/libhardware/include/hardware/camera.h中
typedef struct preview_stream_ops { int (*dequeue_buffer)(struct preview_stream_ops* w, buffer_handle_t** buffer, int *stride); int (*enqueue_buffer)(struct preview_stream_ops* w, buffer_handle_t* buffer); int (*cancel_buffer)(struct preview_stream_ops* w, buffer_handle_t* buffer); int (*set_buffer_count)(struct preview_stream_ops* w, int count); int (*set_buffers_geometry)(struct preview_stream_ops* pw, int w, int h, int format); int (*set_crop)(struct preview_stream_ops *w, int left, int top, int right, int bottom); int (*set_usage)(struct preview_stream_ops* w, int usage); int (*set_swap_interval)(struct preview_stream_ops *w, int interval); int (*get_min_undequeued_buffer_count)(const struct preview_stream_ops *w, int *count); int (*lock_buffer)(struct preview_stream_ops* w, buffer_handle_t* buffer); // Timestamps are measured in nanoseconds, and must be comparable // and monotonically increasing between two frames in the same // preview stream. They do not need to be comparable between // consecutive or parallel preview streams, cameras, or app runs. int (*set_timestamp)(struct preview_stream_ops *w, int64_t timestamp); } preview_stream_ops_t;
在CameraHardwareInterface.h中的初始化函数中,调用initHalPreviewWindow()
status_t initialize(hw_module_t *module) { ALOGI("Opening camera %s", mName.string()); int rc = module->methods->open(module, mName.string(), (hw_device_t **)&mDevice); if (rc != OK) { ALOGE("Could not open camera %s: %d", mName.string(), rc); return rc; } initHalPreviewWindow(); return rc; }
void initHalPreviewWindow() { mHalPreviewWindow.nw.cancel_buffer = __cancel_buffer; mHalPreviewWindow.nw.lock_buffer = __lock_buffer; mHalPreviewWindow.nw.dequeue_buffer = __dequeue_buffer; mHalPreviewWindow.nw.enqueue_buffer = __enqueue_buffer; mHalPreviewWindow.nw.set_buffer_count = __set_buffer_count; mHalPreviewWindow.nw.set_buffers_geometry = __set_buffers_geometry; mHalPreviewWindow.nw.set_crop = __set_crop; mHalPreviewWindow.nw.set_timestamp = __set_timestamp; mHalPreviewWindow.nw.set_usage = __set_usage; mHalPreviewWindow.nw.set_swap_interval = __set_swap_interval; mHalPreviewWindow.nw.get_min_undequeued_buffer_count = __get_min_undequeued_buffer_count; }
OK,将__xxxx函数赋值给mHalPreviewWindow.nw中的函数指针。
在Preview之前需要将ANativeWindow传递到HAL,
/** Set the ANativeWindow to which preview frames are sent */ status_t setPreviewWindow(const sp<ANativeWindow>& buf) { ALOGV("%s(%s) buf %p", __FUNCTION__, mName.string(), buf.get()); if (mDevice->ops->set_preview_window) { mPreviewWindow = buf; mHalPreviewWindow.user = this; ALOGV("%s &mHalPreviewWindow %p mHalPreviewWindow.user %p", __FUNCTION__, &mHalPreviewWindow, mHalPreviewWindow.user); return mDevice->ops->set_preview_window(mDevice, buf.get() ? &mHalPreviewWindow.nw : 0); } return INVALID_OPERATION; }
在HAL中就可以使用这些函数对ANativeWindow进行操作,例如其中的一个函数实现__dequeue_buffer
此函数是从ANativeWindow队列中获取一个闲置buffer,
@CameraHardwareInterface.h
#define anw(n) __to_anw(((struct camera_preview_window *)n)->user) static int __dequeue_buffer(struct preview_stream_ops* w, buffer_handle_t** buffer, int *stride) { int rc; ANativeWindow *a = anw(w); ANativeWindowBuffer* anb; rc = a->dequeueBuffer(a, &anb); if (!rc) { *buffer = &anb->handle; *stride = anb->stride; } return rc; }
在HAL中,我们可以看到对display部分的初始化,初始视频格式设置为HAL_PIXEL_FORMAT_YCrCb_420_SP
ANativeWindowDisplayAdapter::ANativeWindowDisplayAdapter():mDisplayThread(NULL), mDisplayState(ANativeWindowDisplayAdapter::DISPLAY_INIT), mDisplayEnabled(false), mBufferCount(0) { LOG_FUNCTION_NAME; ... mPixelFormat = NULL; mNativeWindowPixelFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; }
以及改变pixelFormat的函数:
err = mANativeWindow->set_buffers_geometry( mANativeWindow, width, height, mNativeWindowPixelFormat); //NV21
此函数直接会直接回调到CameraHardwareInterface.h中的ANativeWindow的函数实现:
static int __set_buffers_geometry(struct preview_stream_ops* w, int width, int height, int format) { ANativeWindow *a = anw(w); return native_window_set_buffers_geometry(a, width, height, format); }
而native_window_set_buffers_geometry()是跨进程的函数调用,定义在system/core/include/system/window.h
/* * native_window_set_buffers_geometry(..., int w, int h, int format) * All buffers dequeued after this call will have the dimensions and format * specified. A successful call to this function has the same effect as calling * native_window_set_buffers_size and native_window_set_buffers_format. * * XXX: This function is deprecated. The native_window_set_buffers_dimensions * and native_window_set_buffers_format functions should be used instead. */ static inline int native_window_set_buffers_geometry( struct ANativeWindow* window, int w, int h, int format) { return window->perform(window, NATIVE_WINDOW_SET_BUFFERS_GEOMETRY, w, h, format); }
Struct ANativeWindow也定义在此文件中,ANativeWindow是OpenGL画图的一个接口,Surface类继承了ANativeWindow类。ANativeWindow类是连接OpenGL和Android窗口系统的桥梁,即OpenGL需要通过ANativeWindow类来间接地操作Android窗口系统。这种桥梁关系是通过EGL库来建立的,所有以egl为前缀的函数名均为EGL库提供的接口,暂时知道这么多。上面的函数是通过window->preform(,,,)实现的
/* * hook used to perform various operations on the surface. * (*perform)() is a generic mechanism to add functionality to * ANativeWindow while keeping backward binary compatibility. * * DO NOT CALL THIS HOOK DIRECTLY. Instead, use the helper functions * defined below. * * (*perform)() returns -ENOENT if the 'what' parameter is not supported * by the surface's implementation. * * The valid operations are: * NATIVE_WINDOW_SET_USAGE * NATIVE_WINDOW_CONNECT (deprecated) * NATIVE_WINDOW_DISCONNECT (deprecated) * NATIVE_WINDOW_SET_CROP (private) * NATIVE_WINDOW_SET_BUFFER_COUNT * NATIVE_WINDOW_SET_BUFFERS_GEOMETRY (deprecated) * NATIVE_WINDOW_SET_BUFFERS_TRANSFORM * NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP * NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS * NATIVE_WINDOW_SET_BUFFERS_FORMAT * NATIVE_WINDOW_SET_SCALING_MODE (private) * NATIVE_WINDOW_LOCK (private) * NATIVE_WINDOW_UNLOCK_AND_POST (private) * NATIVE_WINDOW_API_CONNECT (private) * NATIVE_WINDOW_API_DISCONNECT (private) * NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS (private) * NATIVE_WINDOW_SET_POST_TRANSFORM_CROP (private) * */ int (*perform)(struct ANativeWindow* window, int operation, ... );
定义了一个函数指针int (*perform)(struct ANativeWindow* window, int operation, ... );
@./frameworks/native/libs/gui/SurfaceTextureClient.cpp
int SurfaceTextureClient::perform(int operation, va_list args) { int res = NO_ERROR; switch (operation) { case NATIVE_WINDOW_CONNECT: // deprecated. must return NO_ERROR. break; case NATIVE_WINDOW_DISCONNECT: // deprecated. must return NO_ERROR. break; case NATIVE_WINDOW_SET_USAGE: res = dispatchSetUsage(args); break; case NATIVE_WINDOW_SET_CROP: res = dispatchSetCrop(args); break; case NATIVE_WINDOW_SET_BUFFER_COUNT: res = dispatchSetBufferCount(args); break; case NATIVE_WINDOW_SET_BUFFERS_GEOMETRY: res = dispatchSetBuffersGeometry(args); break; case NATIVE_WINDOW_SET_BUFFERS_TRANSFORM: res = dispatchSetBuffersTransform(args); break; case NATIVE_WINDOW_SET_BUFFERS_TIMESTAMP: res = dispatchSetBuffersTimestamp(args); break; case NATIVE_WINDOW_SET_BUFFERS_DIMENSIONS: res = dispatchSetBuffersDimensions(args); break; case NATIVE_WINDOW_SET_BUFFERS_USER_DIMENSIONS: res = dispatchSetBuffersUserDimensions(args); break; case NATIVE_WINDOW_SET_BUFFERS_FORMAT: res = dispatchSetBuffersFormat(args); break; case NATIVE_WINDOW_LOCK: res = dispatchLock(args); break; case NATIVE_WINDOW_UNLOCK_AND_POST: res = dispatchUnlockAndPost(args); break; case NATIVE_WINDOW_SET_SCALING_MODE: res = dispatchSetScalingMode(args); break; case NATIVE_WINDOW_API_CONNECT: res = dispatchConnect(args); break; case NATIVE_WINDOW_API_DISCONNECT: res = dispatchDisconnect(args); break; default: res = NAME_NOT_FOUND; break; } return res; }
int SurfaceTextureClient::dispatchSetBuffersGeometry(va_list args) { int w = va_arg(args, int); int h = va_arg(args, int); int f = va_arg(args, int); int err = setBuffersDimensions(w, h); if (err != 0) { return err; } return setBuffersFormat(f); }
int SurfaceTextureClient::setBuffersFormat(int format) { ALOGV("SurfaceTextureClient::setBuffersFormat"); if (format<0) return BAD_VALUE; Mutex::Autolock lock(mMutex); mReqFormat = format; return NO_ERROR; }
看不出来什么。mReqFormat也没有什么条件进行判断。再看另外一个函数
int SurfaceTextureClient::dequeueBuffer(android_native_buffer_t** buffer) { ATRACE_CALL(); ALOGV("SurfaceTextureClient::dequeueBuffer"); Mutex::Autolock lock(mMutex); int buf = -1; int reqW = mReqWidth ? mReqWidth : mUserWidth; int reqH = mReqHeight ? mReqHeight : mUserHeight; status_t result = mSurfaceTexture->dequeueBuffer(&buf, reqW, reqH, mReqFormat, mReqUsage);
mSurfaceTexture是I
mSurfaceTexture是ISurfaceTexture类型,是SurfaceTexture服务client service结构中的client。
SurfaceTexture.cpp中
class BpSurfaceTexture : public BpInterface<ISurfaceTexture> { public: virtual status_t dequeueBuffer(int *buf, uint32_t w, uint32_t h, uint32_t format, uint32_t usage) { Parcel data, reply; data.writeInterfaceToken(ISurfaceTexture::getInterfaceDescriptor()); data.writeInt32(w); data.writeInt32(h); data.writeInt32(format); data.writeInt32(usage); status_t result = remote()->transact(DEQUEUE_BUFFER, data, &reply); if (result != NO_ERROR) { return result; } *buf = reply.readInt32(); result = reply.readInt32(); return result; } }
再看BnSurfaceTexture是如何实现DEQUEUE_BUFFER的
status_t BnSurfaceTexture::onTransact( uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) { switch(code) { case DEQUEUE_BUFFER: { CHECK_INTERFACE(ISurfaceTexture, data, reply); uint32_t w = data.readInt32(); uint32_t h = data.readInt32(); uint32_t format = data.readInt32(); uint32_t usage = data.readInt32(); int buf; int result = dequeueBuffer(&buf, w, h, format, usage); reply->writeInt32(buf); reply->writeInt32(result); return NO_ERROR; } break; }
dequeueBuffer在BnSurfaceTexture中没有实现,子类BufferQueue中(@BufferQueue.h)
class BufferQueue : public BnSurfaceTexture { public: // dequeueBuffer gets the next buffer slot index for the client to use. If a // buffer slot is available then that slot index is written to the location // pointed to by the buf argument and a status of OK is returned. If no // slot is available then a status of -EBUSY is returned and buf is // unmodified. // The width and height parameters must be no greater than the minimum of // GL_MAX_VIEWPORT_DIMS and GL_MAX_TEXTURE_SIZE (see: glGetIntegerv). // An error due to invalid dimensions might not be reported until // updateTexImage() is called. virtual status_t dequeueBuffer(int *buf, uint32_t width, uint32_t height, uint32_t format, uint32_t usage); }
具体实现是在BufferQueue.cpp
上面的函数太长,如果当前的format跟参数中的format不一致,则需要重新申请,
sp<GraphicBuffer> graphicBuffer( mGraphicBufferAlloc->createGraphicBuffer( w, h, format, usage, &error));
// mGraphicBufferAlloc is the connection to SurfaceFlinger that is used to // allocate new GraphicBuffer objects. sp<IGraphicBufferAlloc> mGraphicBufferAlloc;
到surfaceFliger了@9:SurfaceFlinger.cpp
sp<GraphicBuffer> GraphicBufferAlloc::createGraphicBuffer(uint32_t w, uint32_t h, PixelFormat format, uint32_t usage, status_t* error) { sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(w, h, format, usage)); status_t err = graphicBuffer->initCheck(); *error = err; if (err != 0 || graphicBuffer->handle == 0) { if (err == NO_MEMORY) { GraphicBuffer::dumpAllocationsToSystemLog(); } ALOGE("GraphicBufferAlloc::createGraphicBuffer(w=%d, h=%d) " "failed (%s), handle=%p", w, h, strerror(-err), graphicBuffer->handle); return 0; } return graphicBuffer; }
GraphicBuffer定义在native/include/ui/GraphicBuffer.h
构造函数中指定format的类型为PixelFormat
namespace android { enum { // // these constants need to match those // in graphics/PixelFormat.java & pixelflinger/format.h // PIXEL_FORMAT_UNKNOWN = 0, PIXEL_FORMAT_NONE = 0, // logical pixel formats used by the SurfaceFlinger ----------------------- PIXEL_FORMAT_CUSTOM = -4, // Custom pixel-format described by a PixelFormatInfo structure PIXEL_FORMAT_TRANSLUCENT = -3, // System chooses a format that supports translucency (many alpha bits) PIXEL_FORMAT_TRANSPARENT = -2, // System chooses a format that supports transparency // (at least 1 alpha bit) PIXEL_FORMAT_OPAQUE = -1, // System chooses an opaque format (no alpha bits required) // real pixel formats supported for rendering ----------------------------- PIXEL_FORMAT_RGBA_8888 = HAL_PIXEL_FORMAT_RGBA_8888, // 4x8-bit RGBA PIXEL_FORMAT_RGBX_8888 = HAL_PIXEL_FORMAT_RGBX_8888, // 4x8-bit RGB0 PIXEL_FORMAT_RGB_888 = HAL_PIXEL_FORMAT_RGB_888, // 3x8-bit RGB PIXEL_FORMAT_RGB_565 = HAL_PIXEL_FORMAT_RGB_565, // 16-bit RGB PIXEL_FORMAT_BGRA_8888 = HAL_PIXEL_FORMAT_BGRA_8888, // 4x8-bit BGRA PIXEL_FORMAT_RGBA_5551 = HAL_PIXEL_FORMAT_RGBA_5551, // 16-bit ARGB PIXEL_FORMAT_RGBA_4444 = HAL_PIXEL_FORMAT_RGBA_4444, // 16-bit ARGB PIXEL_FORMAT_A_8 = 8, // 8-bit A }; typedef int32_t PixelFormat; struct PixelFormatInfo { enum { INDEX_ALPHA = 0, INDEX_RED = 1, INDEX_GREEN = 2, INDEX_BLUE = 3 }; enum { // components ALPHA = 1, RGB = 2, RGBA = 3, L = 4, LA = 5, OTHER = 0xFF }; struct szinfo { uint8_t h; uint8_t l; }; inline PixelFormatInfo() : version(sizeof(PixelFormatInfo)) { } size_t getScanlineSize(unsigned int width) const; size_t getSize(size_t ci) const { return (ci <= 3) ? (cinfo[ci].h - cinfo[ci].l) : 0; } size_t version; PixelFormat format; size_t bytesPerPixel; size_t bitsPerPixel; union { szinfo cinfo[4]; struct { uint8_t h_alpha; uint8_t l_alpha; uint8_t h_red; uint8_t l_red; uint8_t h_green; uint8_t l_green; uint8_t h_blue; uint8_t l_blue; }; }; uint8_t components; uint8_t reserved0[3]; uint32_t reserved1; };
感觉有点问题,这里的PixelFormat跟下面的定义不一致,好像是对PixelFormat的扩展。
格式HAL_PIXEL_FORMAT_YCrCb_420_SP的定义在./system/core/include/system/graphics.h
enum { HAL_PIXEL_FORMAT_RGBA_8888 = 1, HAL_PIXEL_FORMAT_RGBX_8888 = 2, HAL_PIXEL_FORMAT_RGB_888 = 3, HAL_PIXEL_FORMAT_RGB_565 = 4, HAL_PIXEL_FORMAT_BGRA_8888 = 5, HAL_PIXEL_FORMAT_RGBA_5551 = 6, HAL_PIXEL_FORMAT_RGBA_4444 = 7, /* 0x8 - 0xFF range unavailable */ /* * 0x100 - 0x1FF * * This range is reserved for pixel formats that are specific to the HAL * implementation. Implementations can use any value in this range to * communicate video pixel formats between their HAL modules. These formats * must not have an alpha channel. Additionally, an EGLimage created from a * gralloc buffer of one of these formats must be supported for use with the * GL_OES_EGL_image_external OpenGL ES extension. */ /* * Android YUV format: * * This format is exposed outside of the HAL to software decoders and * applications. EGLImageKHR must support it in conjunction with the * OES_EGL_image_external extension. * * YV12 is a 4:2:0 YCrCb planar format comprised of a WxH Y plane followed * by (W/2) x (H/2) Cr and Cb planes. * * This format assumes * - an even width * - an even height * - a horizontal stride multiple of 16 pixels * - a vertical stride equal to the height * * y_size = stride * height * c_stride = ALIGN(stride/2, 16) * c_size = c_stride * height/2 * size = y_size + c_size * 2 * cr_offset = y_size * cb_offset = y_size + c_size * */ HAL_PIXEL_FORMAT_YV12 = 0x32315659, // YCrCb 4:2:0 Planar /* * Android RAW sensor format: * * This format is exposed outside of the HAL to applications. * * RAW_SENSOR is a single-channel 16-bit format, typically representing raw * Bayer-pattern images from an image sensor, with minimal processing. * * The exact pixel layout of the data in the buffer is sensor-dependent, and * needs to be queried from the camera device. * * Generally, not all 16 bits are used; more common values are 10 or 12 * bits. All parameters to interpret the raw data (black and white points, * color space, etc) must be queried from the camera device. * * This format assumes * - an even width * - an even height * - a horizontal stride multiple of 16 pixels (32 bytes). */ HAL_PIXEL_FORMAT_RAW_SENSOR = 0x20, /* Legacy formats (deprecated), used by ImageFormat.java */ HAL_PIXEL_FORMAT_YCbCr_422_SP = 0x10, // NV16 HAL_PIXEL_FORMAT_YCrCb_420_SP = 0x11, // NV21 HAL_PIXEL_FORMAT_YCbCr_422_I = 0x14, // YUY2 };