《【高通SDM660平台】(1) — Camera 驱动 Bringup Guide》
《【高通SDM660平台】(2) — Camera Kernel 驱动层代码逻辑分析》
《【高通SDM660平台】(3) — Camera V4L2 驱动层分析 》
《【高通SDM660平台】(4) — Camera Init 初始化流程 》
《【高通SDM660平台】(5) — Camera Open 流程》
《【高通SDM660平台】(6) — Camera getParameters 及 setParameters 流程》
《【高通SDM660平台】(7) — Camera onPreview 代码流程》
《【高通SDM660平台】(8) — Camera MetaData介绍》
《【高通SDM660平台 Android 10.0】(9) — Qcom Camera Daemon 代码分析》
《【高通SDM660平台 Android 10.0】(10) — Camera Sensor lib 与 Kernel Camera Probe 代码分析》
《【高通SDM660平台】Camera Capture 流程》
《【高通SDM660平台】Camera mm-qcamera-app 代码分析》
简单来说,Camera 设置参数,以前都是调用 SetParameter()/Paramters()
来实现下发或获取参数。
而现在新的 Camera API2 / HAL3 架构,则修改为使用 Camera MetaData 的形式来下发或获取参数。
Camera MetaData 就是将参数以共享内存的形式,将所有的Camera 参数以 有序的结构体的形式 保存在一块连接的内存中。
在API2 中,Java层中直接对参数进行设置并将其封装到Capture_Request即可,
而兼容 API1 ,则在 API1中的 SetParameter()/Paramters()
方法中进行转换,最终以 MetaData 的形式传递下去。
接下来,我们分别来学习下 Camera MetaData 的定义 及 使用方法。
Camera MetaData 的定义,其主要集中在 /system/media/camera/
目录,
从 Android.bp 中可以看出,最终是编译成 libcamera_metadata.so库。
# system/media/camera/Android.bp
subdirs = ["tests"]
cc_library_shared {
name: “libcamera_metadata”,
vendor_available: true,
vndk: {
enabled: true,
},
srcs: [“src/camera_metadata.c”],
include_dirs: ["system/media/private/camera/include"],
local_include_dirs: ["include"],
export_include_dirs: ["include"],
shared_libs: [
"libcutils",
"liblog",
],
}
Camera MetaData 头文件定义在如下几个文件中:
/system/media/camera/include/system/camera_metadata_tags.h
/system/media/camera/include/system/camera_metadata.h
/system/media/camera/include/system/camera_vendor_tags.h
/system/media/camera/src/camera_metadata_tag_info.c
/system/media/camera/src/camera_metadata.c
在 camera_metadata.c 中,有一幅 内存分存图,可以看出 Camera MetaData 数据结构是一块连续的内存空间。
其内存区分布如下:
# system/media/camera/src/camera_metadata.c
/**
每个TAG 对应的数据结构体如下,占用内存 33 Byte,由于是以 8字节对齐,所以该结构体占用 40 个Byte。
/**
* A datum of metadata. This corresponds to camera_metadata_entry_t::data
* with the difference that each element is not a pointer. We need to have a
* non-pointer type description in order to figure out the largest alignment
* requirement for data (DATA_ALIGNMENT).
*/
#define DATA_ALIGNMENT ((size_t) 8)
typedef union camera_metadata_data {
uint8_t u8;
int32_t i32;
float f;
int64_t i64;
double d;
camera_metadata_rational_t r;
} camera_metadata_data_t;
#define ENTRY_ALIGNMENT ((size_t) 4)
typedef struct camera_metadata_buffer_entry {
uint32_t tag;
uint32_t count;
union {
uint32_t offset;
uint8_t value[4];
} data;
uint8_t type;
uint8_t reserved[3];
} camera_metadata_buffer_entry_t;
Camera MetaData 中所有的TAG 定义在 camera_metadata_tags.h 中。
可以看出,目录系统默认定义了 26 个Tag,分别如下:
# system/media/camera/include/system/camera_metadata_tags.h
/* Top level hierarchy definitions for camera metadata. *_INFO sections are for
the static metadata that can be retrived without opening the camera device.
New sections must be added right before ANDROID_SECTION_COUNT to maintain
existing enumerations. */
typedef enum camera_metadata_section {
ANDROID_COLOR_CORRECTION,
ANDROID_CONTROL, // 控制数据
ANDROID_DEMOSAIC,
ANDROID_EDGE,
ANDROID_FLASH, //
ANDROID_FLASH_INFO,
ANDROID_HOT_PIXEL,
ANDROID_JPEG,
ANDROID_LENS,
ANDROID_LENS_INFO,
ANDROID_NOISE_REDUCTION,
ANDROID_QUIRKS,
ANDROID_REQUEST,
ANDROID_SCALER,
ANDROID_SENSOR,
ANDROID_SENSOR_INFO,
ANDROID_SHADING,
ANDROID_STATISTICS,
ANDROID_STATISTICS_INFO,
ANDROID_TONEMAP,
ANDROID_LED,
ANDROID_INFO,
ANDROID_BLACK_LEVEL,
ANDROID_SYNC,
ANDROID_REPROCESS,
ANDROID_DEPTH,
ANDROID_SECTION_COUNT,
VENDOR_SECTION = 0x8000
} camera_metadata_section_t;
由于在内存中,各个tag 数据都是以有序的结构体形式保存起来,各个tag 对应的偏移地址如下:
/**
* Hierarchy positions in enum space. All vendor extension tags must be
* defined with tag >= VENDOR_SECTION_START
*/
typedef enum camera_metadata_section_start {
ANDROID_COLOR_CORRECTION_START = ANDROID_COLOR_CORRECTION << 16,
ANDROID_CONTROL_START = ANDROID_CONTROL << 16,
ANDROID_DEMOSAIC_START = ANDROID_DEMOSAIC << 16,
ANDROID_EDGE_START = ANDROID_EDGE << 16,
ANDROID_FLASH_START = ANDROID_FLASH << 16,
ANDROID_FLASH_INFO_START = ANDROID_FLASH_INFO << 16,
ANDROID_HOT_PIXEL_START = ANDROID_HOT_PIXEL << 16,
ANDROID_JPEG_START = ANDROID_JPEG << 16,
ANDROID_LENS_START = ANDROID_LENS << 16,
ANDROID_LENS_INFO_START = ANDROID_LENS_INFO << 16,
ANDROID_NOISE_REDUCTION_START = ANDROID_NOISE_REDUCTION << 16,
ANDROID_QUIRKS_START = ANDROID_QUIRKS << 16,
ANDROID_REQUEST_START = ANDROID_REQUEST << 16,
ANDROID_SCALER_START = ANDROID_SCALER << 16,
ANDROID_SENSOR_START = ANDROID_SENSOR << 16,
ANDROID_SENSOR_INFO_START = ANDROID_SENSOR_INFO << 16,
ANDROID_SHADING_START = ANDROID_SHADING << 16,
ANDROID_STATISTICS_START = ANDROID_STATISTICS << 16,
ANDROID_STATISTICS_INFO_START = ANDROID_STATISTICS_INFO << 16,
ANDROID_TONEMAP_START = ANDROID_TONEMAP << 16,
ANDROID_LED_START = ANDROID_LED << 16,
ANDROID_INFO_START = ANDROID_INFO << 16,
ANDROID_BLACK_LEVEL_START = ANDROID_BLACK_LEVEL << 16,
ANDROID_SYNC_START = ANDROID_SYNC << 16,
ANDROID_REPROCESS_START = ANDROID_REPROCESS << 16,
ANDROID_DEPTH_START = ANDROID_DEPTH << 16,
VENDOR_SECTION_START = VENDOR_SECTION << 16
} camera_metadata_section_start_t;
接下来,定义了,各个TAG 对应换详细的参数,每个 TAG 以 ##TAG##_START
和 ##TAG##_END
结束。
/** * Main enum for defining camera metadata tags. New entries must always go * before the section _END tag to preserve existing enumeration values. In * addition, the name and type of the tag needs to be added to * system/media/camera/src/camera_metadata_tag_info.c */ typedef enum camera_metadata_tag { ANDROID_COLOR_CORRECTION_MODE = // enum | public ANDROID_COLOR_CORRECTION_START, ANDROID_COLOR_CORRECTION_TRANSFORM, // rational[] | public ANDROID_COLOR_CORRECTION_GAINS, // float[] | public ANDROID_COLOR_CORRECTION_ABERRATION_MODE, // enum | public ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES, // byte[] | public ANDROID_COLOR_CORRECTION_END,
ANDROID_CONTROL_AE_ANTIBANDING_MODE = // enum | public ANDROID_CONTROL_START, ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, // int32 | public ANDROID_CONTROL_AE_LOCK, // enum | public ANDROID_CONTROL_AE_MODE, // enum | public ...... ANDROID_CONTROL_END, ANDROID_FLASH_FIRING_POWER = // byte | system ANDROID_FLASH_START, ANDROID_FLASH_FIRING_TIME, // int64 | system ANDROID_FLASH_MODE, // enum | public ANDROID_FLASH_COLOR_TEMPERATURE, // byte | system ANDROID_FLASH_MAX_ENERGY, // byte | system ANDROID_FLASH_STATE, // enum | public ANDROID_FLASH_END,
# system/media/camera/include/system/camera_metadata.h
// 根据 TAG 数量定义两个数组。
#include “camera_metadata_tags.h”
ANDROID_API
extern unsigned int camera_metadata_section_bounds[ANDROID_SECTION_COUNT][2];
ANDROID_API
extern const char *camera_metadata_section_names[ANDROID_SECTION_COUNT];
/**
接着在该头文件中定义了一些常用的 API 方法:
ANDROID_API
camera_metadata_t *allocate_camera_metadata(size_t entry_capacity,size_t data_capacity);
ANDROID_API
camera_metadata_t place_camera_metadata(void dst, size_t dst_size,size_t data_capacity);
ANDROID_API
void free_camera_metadata(camera_metadata_t *metadata);
ANDROID_API
size_t calculate_camera_metadata_size(size_t entry_count,size_t data_count);
ANDROID_API
camera_metadata_t copy_camera_metadata(void dst, size_t dst_size, const camera_metadata_t *src);
ANDROID_API
int add_camera_metadata_entry(camera_metadata_t dst, uint32_t tag, const void data, size_t data_count);
在该头文件中,定义了供产商自定义 metadata 及查询的方法。
# system/media/camera/include/system/camera_vendor_tags.h
typedef struct vendor_tag_ops vendor_tag_ops_t;
struct vendor_tag_ops {
int (get_tag_count)(const vendor_tag_ops_t v);
void (get_all_tags)(const vendor_tag_ops_t v, uint32_t tag_array);
const char (get_section_name)(const vendor_tag_ops_t v, uint32_t tag);
const char (get_tag_name)(const vendor_tag_ops_t v, uint32_t tag);
int (get_tag_type)(const vendor_tag_ops_t v, uint32_t tag);
void reserved[8];
};
struct vendor_tag_cache_ops {
int (get_tag_count)(metadata_vendor_id_t id);
void (get_all_tags)(uint32_t tag_array, metadata_vendor_id_t id);
const char (get_section_name)(uint32_t tag, metadata_vendor_id_t id);
const char (get_tag_name)(uint32_t tag, metadata_vendor_id_t id);
int (get_tag_type)(uint32_t tag, metadata_vendor_id_t id);
void* reserved[8];
};
# system/media/camera/src/camera_metadata_tag_info.c
const char *camera_metadata_section_names[ANDROID_SECTION_COUNT] = {
[ANDROID_COLOR_CORRECTION] = “android.colorCorrection”,
[ANDROID_CONTROL] = “android.control”,
[ANDROID_DEMOSAIC] = “android.demosaic”,
[ANDROID_EDGE] = “android.edge”,
[ANDROID_FLASH] = “android.flash”,
[ANDROID_FLASH_INFO] = “android.flash.info”,
[ANDROID_HOT_PIXEL] = “android.hotPixel”,
[ANDROID_JPEG] = “android.jpeg”,
[ANDROID_LENS] = “android.lens”,
[ANDROID_LENS_INFO] = “android.lens.info”,
[ANDROID_NOISE_REDUCTION] = “android.noiseReduction”,
[ANDROID_QUIRKS] = “android.quirks”,
[ANDROID_REQUEST] = “android.request”,
[ANDROID_SCALER] = “android.scaler”,
[ANDROID_SENSOR] = “android.sensor”,
[ANDROID_SENSOR_INFO] = “android.sensor.info”,
[ANDROID_SHADING] = “android.shading”,
[ANDROID_STATISTICS] = “android.statistics”,
[ANDROID_STATISTICS_INFO] = “android.statistics.info”,
[ANDROID_TONEMAP] = “android.tonemap”,
[ANDROID_LED] = “android.led”,
[ANDROID_INFO] = “android.info”,
[ANDROID_BLACK_LEVEL] = “android.blackLevel”,
[ANDROID_SYNC] = “android.sync”,
[ANDROID_REPROCESS] = “android.reprocess”,
[ANDROID_DEPTH] = “android.depth”,
};
static tag_info_t android_flash[ANDROID_FLASH_END -
ANDROID_FLASH_START] = {
[ ANDROID_FLASH_FIRING_POWER - ANDROID_FLASH_START ] =
{ “firingPower”, TYPE_BYTE },
[ ANDROID_FLASH_FIRING_TIME - ANDROID_FLASH_START ] =
{ “firingTime”, TYPE_INT64 },
[ ANDROID_FLASH_MODE - ANDROID_FLASH_START ] =
{ “mode”, TYPE_BYTE },
[ ANDROID_FLASH_COLOR_TEMPERATURE - ANDROID_FLASH_START ] =
{ “colorTemperature”, TYPE_BYTE },
[ ANDROID_FLASH_MAX_ENERGY - ANDROID_FLASH_START ] =
{ “maxEnergy”, TYPE_BYTE },
[ ANDROID_FLASH_STATE - ANDROID_FLASH_START ] =
{ “state”, TYPE_BYTE },
};
前面了解清楚它的内存分布,宏定义,及操作方法后,我们开始进入c代码看下它的核心实现。
# system/media/camera/src/camera_metadata.c
#define LOG_TAG “camera_metadata”
#include
#include
// 获取 entries
static camera_metadata_buffer_entry_t get_entries( const camera_metadata_t metadata) {
return (camera_metadata_buffer_entry_t) ((uint8_t)metadata + metadata->entries_start);
}
// 获取 数据
static uint8_t get_data(const camera_metadata_t metadata) {
return (uint8_t)metadata + metadata->data_start;
}
// 分配一个 camera_metadata 结构体对象
camera_metadata_t allocate_camera_metadata(size_t entry_capacity,size_t data_capacity) {
size_t memory_needed = calculate_camera_metadata_size(entry_capacity,data_capacity);
void *buffer = calloc(1, memory_needed);
camera_metadata_t *metadata = place_camera_metadata( buffer, memory_needed, entry_capacity, data_capacity);
return metadata;
}
// 获取 metadata 结构体
camera_metadata_t place_camera_metadata(void dst, size_t dst_size, size_t entry_capacity, size_t data_capacity) {
size_t memory_needed = calculate_camera_metadata_size(entry_capacity, data_capacity);
if (memory_needed > dst_size) return NULL;
camera_metadata_t *metadata = (camera_metadata_t*)dst;
metadata->version = CURRENT_METADATA_VERSION;
metadata->flags = 0;
metadata->entry_count = 0;
metadata->entry_capacity = entry_capacity;
metadata->entries_start = ALIGN_TO(sizeof(camera_metadata_t), ENTRY_ALIGNMENT);
metadata->data_count = 0;
metadata->data_capacity = data_capacity;
metadata->size = memory_needed;
size_t data_unaligned = (uint8_t*)(get_entries(metadata) + metadata->entry_capacity) - (uint8_t*)metadata;
metadata->data_start = ALIGN_TO(data_unaligned, DATA_ALIGNMENT);
metadata->vendor_id = CAMERA_METADATA_INVALID_VENDOR_ID;
assert(validate_camera_metadata_structure(metadata, NULL) == OK);
return metadata;
}
void free_camera_metadata(camera_metadata_t *metadata) {
free(metadata);
}
// 拷贝 metadata 结构体
camera_metadata_t copy_camera_metadata(void dst, size_t dst_size,const camera_metadata_t *src) {
size_t memory_needed = get_camera_metadata_compact_size(src);
camera_metadata_t *metadata = place_camera_metadata(dst, dst_size, src->entry_count, src->data_count);
metadata->flags = src->flags;
metadata->entry_count = src->entry_count;
metadata->data_count = src->data_count;
metadata->vendor_id = src->vendor_id;
memcpy(get_entries(metadata), get_entries(src), sizeof(camera_metadata_buffer_entry_t[metadata->entry_count]));
memcpy(get_data(metadata), get_data(src), sizeof(uint8_t[metadata->data_count]));
assert(validate_camera_metadata_structure(metadata, NULL) == OK);
return metadata;
}
int add_camera_metadata_entry(camera_metadata_t dst, uint32_t tag, const void data, size_t data_count) {
int type = get_local_camera_metadata_tag_type(tag, dst);
return add_camera_metadata_entry_raw(dst, tag, type, data, data_count);
}
int find_camera_metadata_entry(camera_metadata_t src, uint32_t tag, camera_metadata_entry_t entry) {
if (src == NULL) return ERROR;
uint32_t index;
if (src->flags & FLAG_SORTED) {
// Sorted entries, do a binary search
camera_metadata_buffer_entry_t *search_entry = NULL;
camera_metadata_buffer_entry_t key;
key.tag = tag;
search_entry = bsearch(&key, get_entries(src), src->entry_count,
sizeof(camera_metadata_buffer_entry_t), compare_entry_tags);
if (search_entry == NULL) return NOT_FOUND;
index = search_entry - get_entries(src);
} else {
// Not sorted, linear search
camera_metadata_buffer_entry_t *search_entry = get_entries(src);
for (index = 0; index < src->entry_count; index++, search_entry++) {
if (search_entry->tag == tag) {
break;
}
}
if (index == src->entry_count) return NOT_FOUND;
}
return get_camera_metadata_entry(src, index, entry);
}
int delete_camera_metadata_entry(camera_metadata_t dst, size_t index) {
camera_metadata_buffer_entry_t entry = get_entries(dst) + index;
size_t data_bytes = calculate_camera_metadata_entry_data_size(entry->type, entry->count);
if (data_bytes > 0) {
// Shift data buffer to overwrite deleted data
uint8_t *start = get_data(dst) + entry->data.offset;
uint8_t *end = start + data_bytes;
size_t length = dst->data_count - entry->data.offset - data_bytes;
memmove(start, end, length);
// Update all entry indices to account for shift
camera_metadata_buffer_entry_t *e = get_entries(dst);
size_t i;
for (i = 0; i < dst->entry_count; i++) {
if (calculate_camera_metadata_entry_data_size( e->type, e->count) > 0 &&
e->data.offset > entry->data.offset) {
e->data.offset -= data_bytes;
}
++e;
}
dst->data_count -= data_bytes;
}
// Shift entry array
memmove(entry, entry + 1, sizeof(camera_metadata_buffer_entry_t) *(dst->entry_count - index - 1) );
dst->entry_count -= 1;
assert(validate_camera_metadata_structure(dst, NULL) == OK);
return OK;
}
int update_camera_metadata_entry(camera_metadata_t dst,size_t index, const void data,size_t data_count,
camera_metadata_entry_t *updated_entry) {
camera_metadata_buffer_entry_t *entry = get_entries(dst) + index;
size_t data_bytes =calculate_camera_metadata_entry_data_size(entry->type, data_count);
size_t data_payload_bytes =data_count * camera_metadata_type_size[entry->type];
size_t entry_bytes = calculate_camera_metadata_entry_data_size(entry->type, entry->count);
if (data_bytes != entry_bytes) {
// May need to shift/add to data array
if (dst->data_capacity < dst->data_count + data_bytes - entry_bytes) {
// No room
return ERROR;
}
if (entry_bytes != 0) {
// Remove old data
uint8_t *start = get_data(dst) + entry->data.offset;
uint8_t *end = start + entry_bytes;
size_t length = dst->data_count - entry->data.offset - entry_bytes;
memmove(start, end, length);
dst->data_count -= entry_bytes;
// Update all entry indices to account for shift
camera_metadata_buffer_entry_t *e = get_entries(dst);
size_t i;
for (i = 0; i < dst->entry_count; i++) {
if (calculate_camera_metadata_entry_data_size( e->type, e->count) > 0 && e->data.offset > entry->data.offset) {
e->data.offset -= entry_bytes;
}
++e;
}
}
if (data_bytes != 0) {
// Append new data
entry->data.offset = dst->data_count;
memcpy(get_data(dst) + entry->data.offset, data, data_payload_bytes);
dst->data_count += data_bytes;
}
} else if (data_bytes != 0) {
// data size unchanged, reuse same data location
memcpy(get_data(dst) + entry->data.offset, data, data_payload_bytes);
}
if (data_bytes == 0) {
// Data fits into entry
memcpy(entry->data.value, data, data_payload_bytes);
}
entry->count = data_count;
if (updated_entry != NULL) {
get_camera_metadata_entry(dst, index, updated_entry);
}
assert(validate_camera_metadata_structure(dst, NULL) == OK);
return OK;
}
通过 Vendor Ops ,用户可以自已定义 metadata 及 对应的操作方法 ops。
通过 set_camera_metadata_vendor_ops()
及 set_camera_metadata_vendor_cache_ops()
方法 自定义对应的 ops。
# system/media/camera/src/camera_metadata.c
static const vendor_tag_ops_t vendor_tag_ops = NULL;
static const struct vendor_tag_cache_ops vendor_cache_ops = NULL;
// Declared in system/media/private/camera/include/camera_metadata_hidden.h
int set_camera_metadata_vendor_ops(const vendor_tag_ops_t* ops) {
vendor_tag_ops = ops;
return OK;
}
// Declared in system/media/private/camera/include/camera_metadata_hidden.h
int set_camera_metadata_vendor_cache_ops( const struct vendor_tag_cache_ops *query_cache_ops) {
vendor_cache_ops = query_cache_ops;
return OK;
}
static void print_data(int fd, const uint8_t *data_ptr, uint32_t tag, int type, int count, int indentation);
void dump_camera_metadata(const camera_metadata_t *metadata, int fd, int verbosity) {
dump_indented_camera_metadata(metadata, fd, verbosity, 0);
}
Camera MetaData
代码 主要在 frameworks/av/camera/CameraMetadata.cpp
中。
从Android.mk
中可以看出,CameraMetadata.cpp
和 camera client
一起编译到 libcamera_client.so
库中的。
# frameworks/av/camera/Android.mk
LOCAL_SRC_FILES +=
Camera.cpp
CameraMetadata.cpp
CameraParameters.cpp
CameraParameters2.cpp
ICamera.cpp
ICameraClient.cpp \
LOCAL_SHARED_LIBRARIES :=
libcamera_metadata \ // 使用 system 中的 libcamera_metadata.so 共享库
LOCAL_MODULE:= libcamera_client
参考 frameworks/av/services/camera/libcameraservice/CameraFlashlight.cpp
中的代码。
可以看出,当要使用 CameraMetadata,主要步骤如下:
mMetadata
对像CAMERA3_TEMPLATE_PREVIEW
的 MetadatamMetadata->update
更新 Metadata 参数setStreamingRequest
下发参数# frameworks/av/services/camera/libcameraservice/CameraFlashlight.cpp
status_t CameraDeviceClientFlashControl::submitTorchEnabledRequest() {
status_t res;
if (mMetadata == NULL) {
// 1. 初始化 mMetadata 对像
mMetadata = new CameraMetadata();
// 2. 获取 TAG 为 CAMERA3_TEMPLATE_PREVIEW 的 Metadata。
res = mDevice->createDefaultRequest( CAMERA3_TEMPLATE_PREVIEW, mMetadata);
}
// 3. 调用 mMetadata->update 更新 Metadata 参数
uint8_t torchOn = ANDROID_FLASH_MODE_TORCH;
mMetadata->update(ANDROID_FLASH_MODE, &torchOn, 1);
mMetadata->update(ANDROID_REQUEST_OUTPUT_STREAMS, &mStreamId, 1);
uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
mMetadata->update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
int32_t requestId = 0;
mMetadata->update(ANDROID_REQUEST_ID, &requestId, 1);
if (mStreaming) {
// 4. 调用setStreamingRequest 下发参数
res = mDevice->setStreamingRequest(*mMetadata);
======================>
+ @ frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
+ List<const CameraMetadata> requests;
+ requests.push_back(request);
+ return setStreamin=RequestList(requests, /*lastFrameNumber*/NULL);
+ =======>
+ return submitRequestsHelper(requests, /*repeating*/true, lastFrameNumber);
<======================
} else {
res = mDevice->capture(*mMetadata);
}
return res;
}
可以看到 ,最终跑到了Camera3Device.cpp 中提交 request ,最终将 request 放入mRequestQueue 中,
由 Camera3Device::RequestThread 来对消息进行处理。
# frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp
status_t Camera3Device::submitRequestsHelper(
const List<const CameraMetadata> &requests, bool repeating, /out/ int64_t *lastFrameNumber) {
RequestList requestList;
res = convertMetadataListToRequestListLocked(requests, /*out*/&requestList);
if (repeating) {
res = mRequestThread->setRepeatingRequests(requestList, lastFrameNumber);
} else {
res = mRequestThread->queueRequestList(requestList, lastFrameNumber);
}
if (res == OK) {
waitUntilStateThenRelock(/*active*/true, kActiveTimeout);
if (res != OK) {
SET_ERR_L("Can't transition to active in %f seconds!", kActiveTimeout/1e9);
}
ALOGV("Camera %d: Capture request %" PRId32 " enqueued", mId,
(*(requestList.begin()))->mResultExtras.requestId);
}
return res;
}
我们来看下 Camera3Device::RequestThread::threadLoop()
的具体实现:
process_capture_request()
方法,处理request 请求# frameworks/av/services/camera/libcameraservice/device3/Camera3Device.cpp bool Camera3Device::RequestThread::threadLoop() { // 1. 等待下一个 request 请求,将请求保存在 mNextRequests 中。 // Wait for the next batch of requests. waitForNextRequestBatch(); ===========> + additionalRequest.captureRequest = waitForNextRequestLocked(); + mNextRequests.add(additionalRequest); <=========== if (mNextRequests.size() == 0) { return true; } // 2. 获取 最新的request 的Entry, 这里为 CAMERA3_TEMPLATE_PREVIEW // Get the latest request ID, if any int latestRequestId; camera_metadata_entry_t requestIdEntry = mNextRequests[mNextRequests.size() - 1]. captureRequest->mSettings.find(ANDROID_REQUEST_ID); if (requestIdEntry.count > 0) { latestRequestId = requestIdEntry.data.i32[0]; } // Prepare a batch of HAL requests and output buffers. res = prepareHalRequests(); =============> + status_t res = insertTriggers(captureRequest); + -------------> + mTriggerRemovedMap.add(tag, trigger); + res = metadata.update(tag, &entryValue, /*count*/1); + <------------- + mPrevRequest = captureRequest; <=============
mLatestRequestId = latestRequestId; mLatestRequestSignal.signal(); // 3. 调用hardware层的方法,处理request 请求 ALOGVV("%s: %d: submitting %zu requests in a batch.", __FUNCTION__, __LINE__, mNextRequests.size()); for (auto& nextRequest : mNextRequests) { // Submit request and block until ready for next one ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number); ATRACE_BEGIN("camera3->process_capture_request"); res = mHal3Device->ops->process_capture_request(mHal3Device, &nextRequest.halRequest); ============> + # hardware/qcom/camera/QCamera2/HAL3/QCamera3HWI.cpp + QCamera3HardwareInterface *hw = reinterpret_cast<QCamera3HardwareInterface *>(device->priv); + int rc = hw->orchestrateRequest(request); + <============ // Mark that the request has be submitted successfully. nextRequest.submitted = true; // Update the latest request sent to HAL if (nextRequest.halRequest.settings != NULL) { // Don't update if they were unchanged Mutex::Autolock al(mLatestRequestMutex); camera_metadata_t* cloned = clone_camera_metadata(nextRequest.halRequest.settings); mLatestRequest.acquire(cloned); sp<Camera3Device> parent = mParent.promote(); if (parent != NULL) { parent->monitorMetadata(TagMonitor::REQUEST, nextRequest.halRequest.frame_number, 0, mLatestRequest); } } // 移除当前请求 // Remove any previously queued triggers (after unlock) res = removeTriggers(mPrevRequest); } mNextRequests.clear(); return true;
}
# hardware/qcom/camera/QCamera2/HAL3/QCamera3HWI.cpp /*=========================================================================== * FUNCTION : orchestrateRequest * DESCRIPTION: Orchestrates a capture request from camera service * * PARAMETERS : * @request : request from framework to process * RETURN : Error status codes *==========================================================================*/ int32_t QCamera3HardwareInterface::orchestrateRequest( camera3_capture_request_t *request) {
uint32_t originalFrameNumber = request->frame_number; uint32_t originalOutputCount = request->num_output_buffers; const camera_metadata_t *original_settings = request->settings; List<InternalRequest> internallyRequestedStreams; List<InternalRequest> emptyInternalList; if (isHdrSnapshotRequest(request) && request->input_buffer == NULL) { LOGD("Framework requested:%d buffers in HDR snapshot", request->num_output_buffers); uint32_t internalFrameNumber; CameraMetadata modified_meta; int8_t hdr_exp_values; cam_hdr_bracketing_info_t& hdrBracketingSetting = gCamCapability[mCameraId]->hdr_bracketing_setting; uint32_t hdrFrameCount = hdrBracketingSetting.num_frames; LOGD("HDR values %d, %d frame count: %u", (int8_t) hdrBracketingSetting.exp_val.values[0], (int8_t) hdrBracketingSetting.exp_val.values[1], hdrFrameCount); cam_exp_bracketing_t aeBracket; memset(&aeBracket, 0, sizeof(cam_exp_bracketing_t)); aeBracket.mode = hdrBracketingSetting.exp_val.mode; if (aeBracket.mode == CAM_EXP_BRACKETING_OFF) { LOGD(" Bracketing is Off"); } /* Add Blob channel to list of internally requested streams */ for (uint32_t i = 0; i < request->num_output_buffers; i++) { if (request->output_buffers[i].stream->format == HAL_PIXEL_FORMAT_BLOB) { InternalRequest streamRequested; streamRequested.meteringOnly = 1; streamRequested.need_metadata = 0; streamRequested.stream = request->output_buffers[i].stream; internallyRequestedStreams.push_back(streamRequested); } } request->num_output_buffers = 0; auto itr = internallyRequestedStreams.begin(); // 获取metadata修改的地方 /* Modify setting to set compensation */ modified_meta = request->settings; hdr_exp_values = hdrBracketingSetting.exp_val.values[0]; int32_t expCompensation = hdr_exp_values; uint8_t aeLock = 1; modified_meta.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &expCompensation, 1); modified_meta.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1); camera_metadata_t *modified_settings = modified_meta.release(); request->settings = modified_settings; /* Capture Settling & -2x frame */ _orchestrationDb.generateStoreInternalFrameNumber(internalFrameNumber); request->frame_number = internalFrameNumber; processCaptureRequest(request, internallyRequestedStreams); request->num_output_buffers = originalOutputCount; _orchestrationDb.allocStoreInternalFrameNumber(originalFrameNumber, internalFrameNumber); request->frame_number = internalFrameNumber; mHdrFrameNum = internalFrameNumber; processCaptureRequest(request, emptyInternalList); request->num_output_buffers = 0; modified_meta = modified_settings; hdr_exp_values = hdrBracketingSetting.exp_val.values[1]; expCompensation = hdr_exp_values; aeLock = 1; modified_meta.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &expCompensation, 1); modified_meta.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1); modified_settings = modified_meta.release(); request->settings = modified_settings; /* Capture Settling & 0X frame */ itr = internallyRequestedStreams.begin(); if (itr == internallyRequestedStreams.end()) { LOGE("Error Internally Requested Stream list is empty"); assert(0); } else { itr->need_metadata = 0; itr->meteringOnly = 1; } _orchestrationDb.generateStoreInternalFrameNumber(internalFrameNumber); request->frame_number = internalFrameNumber; processCaptureRequest(request, internallyRequestedStreams); ==================> + rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters); + ==================> + - # hardware/qcom/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_interface.c + - /* camera ops v-table */ + - static mm_camera_ops_t mm_camera_ops = { + - .set_parms = mm_camera_intf_set_parms, + - .get_parms = mm_camera_intf_get_parms, + - } + - ==================> + - | mm_camera_set_parms(my_obj, parms); + - | ==================> + - | + # hardware/qcom/camera/QCamera2/stack/mm-camera-interface/src/mm_camera.c + - | + c = mm_camera_util_s_ctrl(my_obj, 0, my_obj->ctrl_fd, CAM_PRIV_PARM, &value); + - | + =========> + - | + # hardware/qcom/camera/QCamera2/stack/mm-camera-interface/src/mm_camera.c + - | + control.id = id; + - | + control.value = *value; + - | + rc = ioctl(fd, VIDIOC_S_CTRL, &control); + - | <================== + - <================== + <================== <================== _orchestrationDb.generateStoreInternalFrameNumber(internalFrameNumber); request->frame_number = internalFrameNumber; processCaptureRequest(request, internallyRequestedStreams); /* Capture 2X frame*/ modified_meta = modified_settings; hdr_exp_values = hdrBracketingSetting.exp_val.values[2]; expCompensation = hdr_exp_values; aeLock = 1; modified_meta.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &expCompensation, 1); modified_meta.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1); modified_settings = modified_meta.release(); request->settings = modified_settings; _orchestrationDb.generateStoreInternalFrameNumber(internalFrameNumber); request->frame_number = internalFrameNumber; processCaptureRequest(request, internallyRequestedStreams); _orchestrationDb.generateStoreInternalFrameNumber(internalFrameNumber); request->frame_number = internalFrameNumber; mHdrSnapshotRunning = true; processCaptureRequest(request, internallyRequestedStreams); /* Capture 2X on original streaming config*/ internallyRequestedStreams.clear(); /* Restore original settings pointer */ request->settings = original_settings; } else { uint32_t internalFrameNumber; _orchestrationDb.allocStoreInternalFrameNumber(request->frame_number, internalFrameNumber); request->frame_number = internalFrameNumber; return processCaptureRequest(request, internallyRequestedStreams); } return NO_ERROR;
}
在前面追代码最终追到 ioctl(fd, VIDIOC_S_CTRL, &control);
往 V4L2 下发 control ,相关request 请求,保存在control 中
# kernel/msm-4.4/drivers/media/v4l2-core/v4l2-subdev.c
static long subdev_do_ioctl(struct file file, unsigned int cmd, void arg)
{
switch (cmd) {
case VIDIOC_G_CTRL:
return v4l2_g_ctrl(vfh->ctrl_handler, arg);
case VIDIOC_S_CTRL:
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, arg);
}
}
在 v4l2-ctrls.c 中
# kernel/msm-4.4/drivers/media/v4l2-core/v4l2-ctrls.c
int v4l2_s_ctrl(struct v4l2_fh fh, struct v4l2_ctrl_handler hdl, struct v4l2_control control)
{
struct v4l2_ctrl ctrl = v4l2_ctrl_find(hdl, control->id);
struct v4l2_ext_control c = { control->id };
int ret;
if (ctrl->flags & V4L2_CTRL_FLAG_READ_ONLY)
return -EACCES;
c.value = control->value;
ret = set_ctrl_lock(fh, ctrl, &c);
===============>
user_to_new(c, ctrl);
ret = set_ctrl(fh, ctrl, 0);
========>
return try_or_set_cluster(fh, master, true, ch_flags);
<===============
control->value = c.value;
return ret;
}
EXPORT_SYMBOL(v4l2_s_ctrl);
在 try_or_set_cluster() 中
# kernel/msm-4.4/drivers/media/v4l2-core/v4l2-ctrls.c /* Core function that calls try/s_ctrl and ensures that the new value is copied to the current value on a set. Must be called with ctrl->handler->lock held. */ static int try_or_set_cluster(struct v4l2_fh *fh, struct v4l2_ctrl *master, bool set, u32 ch_flags) { ret = call_op(master, try_ctrl);
ret = call_op(master, s_ctrl); /* If OK, then make the new values permanent. */ update_flag = is_cur_manual(master) != is_new_manual(master); for (i = 0; i < master->ncontrols; i++) new_to_cur(fh, master->cluster[i], ch_flags | ((update_flag && i > 0) ? V4L2_EVENT_CTRL_CH_FLAGS : 0)); return 0;
}
接下来调用 call_op(master, s_ctrl)
, 进行参数设置。 call_op 定义如下
kernel/msm-4.4/drivers/media/v4l2-core/v4l2-ctrls.c
#define call_op(master, op) \
(has_op(master, op) ? master->ops->op(master) : 0)
因为 mastart 的类型为 struct v4l2_ctrl *
struct v4l2_ctrl {
const struct v4l2_ctrl_ops ops;
const struct v4l2_ctrl_type_ops type_ops;
–––––––––––––––––––––
# kernel/msm-4.4/include/media/v4l2-ctrls.h
v4l2_ctrl_ops定义为
struct v4l2_ctrl_ops {
int (g_volatile_ctrl)(struct v4l2_ctrl ctrl);
int (try_ctrl)(struct v4l2_ctrl ctrl);
int (s_ctrl)(struct v4l2_ctrl ctrl);
};
const struct v4l2_ctrl_ops *ops;
是在v4l2_ctrl_new()
中初始化的
/* Add a new control */
static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
const struct v4l2_ctrl_type_ops *type_ops,
u32 id, const char *name, enum v4l2_ctrl_type type,
s64 min, s64 max, u64 step, s64 def,
const u32 dims[V4L2_CTRL_MAX_DIMS], u32 elem_size,
u32 flags, const char * const *qmenu,
const s64 *qmenu_int, void *priv)
{
ctrl->handler = hdl;
ctrl->ops = ops;
ctrl->type_ops = type_ops ? type_ops : &std_type_ops;
ctrl->id = id;
ctrl->name = name;
ctrl->type = type;
我们随便找个代码,如: kernel/msm-4.4/drivers/media/i2c/ov7670.c
虽然这个代码际不会跑,但我们参考学习 ctrl 是啥 ,还是合适的。
在probe 初始化时,初始化了大量的ctrl, 以 V4L2_CID_BRIGHTNESSb 为例,看下面代码追踪,可以发现,最终跑到了写寄存器的地方。
这样就很清晰了。
# kernel/msm-4.4/drivers/media/i2c/ov7670.c
static int ov7670_probe(struct i2c_client *client,const struct i2c_device_id *id)
{
v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops, V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,V4L2_CID_CONTRAST, 0, 127, 1, 64);
v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,V4L2_CID_HFLIP, 0, 1, 1, 0);
info->saturation = v4l2_ctrl_new_std(&info->hdl, &ov7670_ctrl_ops,V4L2_CID_SATURATION, 0, 256, 1, 128);
}
static const struct v4l2_ctrl_ops ov7670_ctrl_ops = {
.s_ctrl = ov7670_s_ctrl,
.g_volatile_ctrl = ov7670_g_volatile_ctrl,
};
static int ov7670_s_ctrl(struct v4l2_ctrl ctrl)
{
struct v4l2_subdev sd = to_sd(ctrl);
struct ov7670_info *info = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
return ov7670_s_brightness(sd, ctrl->val);
case V4L2_CID_CONTRAST:
return ov7670_s_contrast(sd, ctrl->val);
case V4L2_CID_SATURATION:
return ov7670_s_sat_hue(sd,info->saturation->val, info->hue->val);
return -EINVAL;
}
static int ov7670_s_brightness(struct v4l2_subdev *sd, int value)
{
unsigned char com8 = 0, v;
int ret;
ov7670_read(sd, REG_COM8, &com8);
com8 &= ~COM8_AEC;
ov7670_write(sd, REG_COM8, com8);
v = ov7670_abs_to_sm(value);
ret = ov7670_write(sd, REG_BRIGHT, v);
return ret;
}
在前面 3.1 中,我们详细跟踪代码看了CameraMetadata 下发参数的过程。
mMetadata
对像,获取 TAG 为 CAMERA3_TEMPLATE_PREVIEW
的 MetadatamMetadata->update
更新 Metadata
参数,调用setStreamingRequest
下发参数Camera3Device.cpp
中,最终将 request 放入mRequestQueue
中Camera3Device::RequestThread::threadLoop()
来对 mRequestQueue
消息进行处理process_capture_request()
方法,处理request 请求QCamera3HardwareInterface::orchestrateRequest()
中处理上层下发的 request 请求ioctl(fd, VIDIOC_S_CTRL, &control);
往 V4L2 下发参数。V4L2_ctrl
时的 ops ,调用不同的操作函数,来更新具体的硬件寄存器。# frameworks/av/include/camera/CameraMetadata.h class CameraMetadata: public Parcelable { public: /** Creates an empty object; best used when expecting to acquire contents from elsewhere */ CameraMetadata(); /** Creates an object with space for entryCapacity entries, with dataCapacity extra storage */ CameraMetadata(size_t entryCapacity, size_t dataCapacity = 10); /** Takes ownership of passed-in buffer */ CameraMetadata(camera_metadata_t *buffer); /** Clones the metadata */ CameraMetadata(const CameraMetadata &other);
/* Update metadata entry. Will create entry if it doesn't exist already, and * will reallocate the buffer if insufficient space exists. Overloaded for * the various types of valid data. */ status_t update(uint32_t tag, const uint8_t *data, size_t data_count); status_t update(uint32_t tag, const int32_t *data, size_t data_count); status_t update(uint32_t tag, const float *data, size_t data_count); status_t update(uint32_t tag, const int64_t *data, size_t data_count); status_t update(uint32_t tag, const double *data, size_t data_count); status_t update(uint32_t tag, const camera_metadata_rational_t *data, size_t data_count); status_t update(uint32_t tag, const String8 &string); status_t update(const camera_metadata_ro_entry &entry); template<typename T> status_t update(uint32_t tag, Vector<T> data) { return update(tag, data.array(), data.size()); } // Metadata object is unchanged when reading from parcel fails. virtual status_t readFromParcel(const Parcel *parcel) override; virtual status_t writeToParcel(Parcel *parcel) const override; /* Caller becomes the owner of the new metadata * 'const Parcel' doesnt prevent us from calling the read functions. * which is interesting since it changes the internal state * * NULL can be returned when no metadata was sent, OR if there was an issue * unpacking the serialized data (i.e. bad parcel or invalid structure).*/ static status_t readFromParcel(const Parcel &parcel, camera_metadata_t** out); /* Caller retains ownership of metadata * - Write 2 (int32 + blob) args in the current position */ static status_t writeToParcel(Parcel &parcel, const camera_metadata_t* metadata);
private:
camera_metadata_t *mBuffer;
当需要修改 metadata 数据时,调用 update 方法,如下
# frameworks/av/camera/CameraMetadata.cpp
status_t CameraMetadata::update(uint32_t tag, const int32_t *data, size_t data_count) {
return updateImpl(tag, (const void*)data, data_count);
}
可以看出,最终调用的都是 CameraMetadata::updateImpl()
方法,我们来看下它的具体实现
可以看出,它处理方法是,如果entry 已经有了,则更新其数据,如果不存在,则新增一个entry。
最终,metadata 在保存在内存中, 注意,由于此时参数并没有下发,所以此时参数肯定是不生效的。