这几天简单的研究了一下某个 UVC lib库,该库直接调用底层UVC驱动,通过JNI 响应APP的操控。内容如下:
目录:
研究 UVC lib库 是为了更好的理解前面学习的UVC驱动,通过研究 UVC lib库,更清楚的了解了 UVC 驱动的操作流程与注意事项。上下层各个IOCTL都是一一对应的。
和前面学习的UVC驱动的初始化顺序大致相同,该库是在JNI 中进行初始化,做了如下工作:
/*用于记录缓存信息 : 缓存地址和长度*/
struct buffer {
void * start;//开始位置
size_t length;//长度
};
static char dev_name[16];/* 用于存储 /dev/video[0-3]字符 */
static int fd = -1; /*目标设备*/
struct buffer * buffers = NULL;//申请空间地址
static unsigned int n_buffers = 0;
static int camerabase = -1;
static char *pImageBuf = NULL;
static int imageBufSize = 0;//存储图像占用的总字节数
static int realImageSize = 0;
static int image_index = 0;
1 openDevice()
sprintf(dev_name,"/dev/video%d",i);// 将 /dev/video4 写入 dev_name
fd = open (dev_name, O_RDWR | O_NONBLOCK, 0);
2 initDevice()
2.1 ioctl(VIDIOC_QUERYCA) : 获取设备支持的操作 确认是摄像头设备
a xioctl (fd, VIDIOC_QUERYCAP, struct v4l2_capability cap) /* 获取设备支持的操作 一般是 V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING */
b if (!(pCap->capabilities & V4L2_CAP_VIDEO_CAPTURE)) /*获取成功,检查是否有视频捕获功能 */
c if (!(pCap->capabilities & V4L2_CAP_STREAMING)) /*V4L2_CAP_STREAMING代表我们不是用read、write来读取视频设备,而是用IOctl等接口*/
2.2 ioctl(VIDIOC_CROPCAP):查询驱动的修剪能力
a pCropcap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
b xioctl (fd, VIDIOC_CROPCAP, struct v4l2_cropcap *pCropcap) 获取边框信息
2.3 ioctl(VIDIOC_S_CROP) : 尝试设置视频信号的边框
a pCrop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
b pCrop->c = struct v4l2_cropcap *pCropcap->struct v4l2_rect; 提取前面获取的边框信息
c xioctl (fd, VIDIOC_S_CROP, struct v4l2_crop *pCrop) 尝试重新设置边框信息
2.4 ioctl(VIDIOC_S_FMT) : 设置当前摄像头的频捕获格式
a
设置 pFormat->type 为 V4L2_BUF_TYPE_VIDEO_CAPTURE,在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 图像宽 640
设置 图像高 480
设置 视频数据存储类型为 V4L2_PIX_FMT_MJPEG
设置 Images包含top和bottom 两个field, 隔行交替
struct v4l2_format *pFormat->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
struct v4l2_format *pFormat->fmt.pix.width = IMG_WIDTH; //#define IMG_WIDTH 640
struct v4l2_format *pFormat->fmt.pix.height = IMG_HEIGHT;//#define IMG_HEIGHT 480
struct v4l2_format *pFormat->fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG; // 视频数据存储类型 V4L2_PIX_FMT_YUYV
struct v4l2_format *pFormat->fmt.pix.field = V4L2_FIELD_INTERLACED; //一帧图像分为两场 奇场 偶场
b xioctl (fd, VIDIOC_S_FMT, struct v4l2_format *pFormat) 设置当前摄像头的频捕获格式
2.5 ioctl(VIDIOC_S_PARM) :设置数据流帧率等参数,每秒30帧
a
设置 视频捕捉模式,在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 pStreamparam->parm.capture.timeperframe.numerator = 1
设置 pStreamparam->parm.capture.timeperframe.denominator = 30
以上设置代表每秒传输30帧
struct v4l2_streamparm *pStreamparam->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
struct v4l2_streamparm *pStreamparam->parm.capture.timeperframe.numerator = 1;
struct v4l2_streamparm *pStreamparam->parm.capture.timeperframe.denominator = 30;
b xioctl(fd, VIDIOC_S_PARM, struct v4l2_streamparm *pStreamparam)
2.6 设置一帧图像占用的总字节数
fmt.fmt.pix.bytesperline = fmt.fmt.pix.width * 2
fmt.fmt.pix.sizeimage = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;
imageBufSize = fmt.fmt.pix.sizeimage;
2.7 initMmap ()
a ioctl(VIDIOC_REQBUFS): 请求系统分配缓冲区,请求kernel分配 4 个缓冲区。
a.1 设置
struct v4l2_requestbuffers *pReqBuf->count = 4;
struct v4l2_requestbuffers *pReqBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
struct v4l2_requestbuffers *pReqBuf->memory = V4L2_MEMORY_MMAP;
设置 请求的缓存数量。在缓存队列里保存4张照片
设置 V4L2_BUF_TYPE_VIDEO_CAPTURE, 在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 应用使用内存映射的方式从V4L2驱动申请buffer
a.2 xioctl (fd, VIDIOC_REQBUFS, struct v4l2_requestbuffers)
b struct buffer * buffers = (struct buffer*)calloc (struct v4l2_requestbuffers req.count, sizeof (*buffers));
创建 buffers空间,用于存储 上面请求的缓存区信息,地址和长度。该buffers空间包含 struct v4l2_requestbuffers req.count=4 个缓存,对因上面的四个缓存。每个缓存大小为 sizeof (*buffers)。
c 查询所分配的缓冲区。并且将kernel的缓冲区信息赋值给上面定义的 struct v4l2_buffer buf
C.1 ioctl(VIDIOC_QUERYBUF) : 查询kernel所分配的缓冲区,并且将kernel的缓冲区信息赋值给上面定义的 struct v4l2_buffer buf
1
struct v4l2_buffer *pBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
struct v4l2_buffer *pBuf->memory = V4L2_MEMORY_MMAP;
struct v4l2_buffer *pBuf->index = bufIndex;
设置 struct v4l2_buffer *pBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE, 在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 struct v4l2_buffer 应用使用内存映射的方式从V4L2驱动申请buffer
设置 struct v4l2_buffer id number of the buffer
c.2 xioctl (fd, VIDIOC_QUERYBUF, pBuf) 查询kernel所分配的缓冲区,并且将kernel的缓冲区信息赋值给上面定义的 struct v4l2_buffer buf
d 遍历前面创建的 buffers中的 4个 缓存区,初始化该 buffers空间,用上一步得到的 从系统申请的4个缓存的信息 struct v4l2_buffer buf 设置上面分配buffers空间,用于存储 上面请求的缓存区信息,地址和长度。其中地址是实现映射后的进程地址。
d.1 buffers[n_buffers].length = buf.length; 保存申请的缓存区的 长度
d.2 buffers[n_buffers].start = mmap (NULL ,buf.length,PROT_READ | PROT_WRITE,MAP_SHARED,fd, buf.m.offset);
将一个文件或者其它对象映射到进程的地址空间,实现文件磁盘地址和进程虚拟地址空间中一段虚拟地址的一一对映关系
d.3 初始化 buffers[n_buffers].star 所指向的前 buf.length 字节的内存单元 : memset(buffers[n_buffers].start, 0xab, buf.length);
3 startCapturing()
1 定义四个缓存 struct v4l2_buffer buf
2 设置这四个缓存
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
3 xioctl(fd, VIDIOC_QBUF, &buf):将该四个缓冲区放入队列
4 v4l2_ioctl_streamon(fd) : 启动摄像头
enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
xioctl (fd, VIDIOC_STREAMON, &type)
---------------------------------
jint Java_com_camera_simplewebcam_CameraPreview_prepareCamera( JNIEnv* env,jobject thiz, jint videoid){
int ret;
int camerabase = -1;
LOGE("Starting %s()\n", __FUNCTION__);
/* 确认有几个 video设备,存在的话 应该是4个*/
if(camerabase<0){
camerabase = checkCamerabase();//4
}
ret = openDevice(camerabase + videoid);//4+0 返回 fd
if (SUCCESS_LOCAL != ret) {
LOGE("Open device(/dev/video%d) failed", (camerabase+videoid));
/* 打开 /dev/videox */
ret = openDevice(camerabase+videoid+1);
if (0 != ret) {
LOGE("Open device(/dev/video%d) failed", (camerabase+videoid+1));
return ret;
}
return ret;
}
/* 初始化设备
1 查询驱动功能 确认是视频设备
2 查询驱动的修剪能力
3 尝试设置视频信号的边框
4 设置当前驱动的频捕获格式
5 设置数据流帧率等参数,每秒30帧
6 设置图像占用的总字节数
7 initMmap ()
1 ioctl(VIDIOC_REQBUFS): 分配内存
2 申请空间 4 * sizeof (*buffers)
3 ioctl(VIDIOC_QUERYBUF) : 查询所分配的缓冲区,获取内部buf信息到pBuf
4 mmap()映射存储区 与 磁盘文件fd中的目标空间
5 初始化 4个 buffer 空间都为 0xab
*/
ret = initDevice();
if (SUCCESS_LOCAL != ret) {
LOGE("init device failed");
return ret;
}
/*
1 将缓冲区放入队列
2 启动摄像头
*/
ret = startCapturing();
if(SUCCESS_LOCAL != ret) {
LOGE("startCapturing() error, then stopCapturing --> uninitDevice --> closeDevice");
/* 关闭摄像头 */
stopCapturing();
uninitDevice ();
closeDevice ();
// write_int(OTG_POWER_PATH, 0);
return ret;
}
return ret;
}
预览,如果点击录像或者拍照 _startRecording 或 _startCaptureImage 被置1 则开始录像或拍照 保存数据。其实就是向已经创建好的视频文件和图片文件中写数据。如果没有点击拍照或者录像,则一直预览,将预览数据传递给APP。该预览功能也是通过JNI 调用。
/* 预览 */
int
Java_com_camera_simplewebcam_CameraPreview_readFrame( JNIEnv* env,
jobject thiz){
int ret = 0;
ENTER_FUNC_LOG();
ret = readframeonce();
EXIT_FUNC_LOG();
if (SUCCESS_LOCAL == ret) {
return getRealImageSize();
} else {
return 0;
}
}
readframeonce() 工作如下:
static char *pImageBuf = NULL;//用于存储用于预览的数据,在没有开始录像之前 数据都会拷贝到这里 传递给app 显示
static int imageBufSize = 0;//存储图像占用的总字节数
static int realImageSize = 0;
1 readFrame()
a xioctl (VIDIOC_DQBUF) 把数据从缓存中读取出来
a.1 struct v4l2_buffer buf//定义 struct v4l2_buffer buf 用于存储数据
a.2 v4l2_ioctl_dqbuf(fd, &buf);
xioctl (fd, VIDIOC_DQBUF, pBuf)//把数据信息从缓存中读取出来 数据存储于 struct v4l2_buffer buf
/*
buffers[buf.index].start : buffers[struct v4l2_buffer buf.index].start。共享内存中的数据起始地址
struct v4l2_buffer buf.bytesused 缓冲区中数据占用的字节数
*/
b processFrame (buffers[buf.index].start, buf.bytesused);
b.1 定义 struct v4l2_control v4l2_ctrl。 APP 填写结构体中的id. 通过调用VIDIOC_G_CTRL,driver 会填写结构体中value项
struct v4l2_control v4l2_ctrl = { .id = V4L2_CID_BACKLIGHT_COMPENSATION };
b.2 xioctl(fd, VIDIOC_G_CTRL, &struct v4l2_control v4l2_ctrl):
利用VIDIOC_G_CTRL得到一些设置:一些具体的设置,如亮度,饱和度,对比度等信息。可以通过VIDIOC_G_CTRL得到当前值。
/*
size = struct v4l2_buffer buf.bytesused 缓冲区中数据占用的字节数
(char *)buffers[buf.index].start
*/
//如果开始录像 _startRecording==1 则执行以下 qFrameBuffer
b.3 qFrameBuffer((char *)buffers[buf.index].start, size)
从共享内村中获取图像数据,将图像数据拷贝到 _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer中
b.3.1 memset(_frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer, 0, _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].size);
初始化 frameBufferQueue.frameBuffer[]结构体数组 第 _frameBufferQueue.writeIndex 个 frameBuffer结构体缓存区的buffer指针指向的 0到size 的区域 初始化为0
b.3.2 _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].usedSize = 0;
初始化 frameBufferQueue.frameBuffer[]结构体数组 第 _frameBufferQueue.writeIndex 个 frameBuffer结构体缓存区的 usedSize = 0
b.3.3 memcpy(_frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer, imageData, len);
从 imageData 指向的空间 复制 len 个字节的数据 到 _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer frameBuffer结构体数组中的 buffer
即 从 用于存储缓存信息(地址 长度)的 struct buffer *buffers.start 地址处 拷贝len长度字节数据到 _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer
b.3.4
设置 帧缓存结构体中的 frameBuffer结构体数组中的 usedSize = len 标记已经拷贝到数据的长度
设置 帧缓存结构体中的 writeIndex
设置 帧缓存结构体中的 usedCount++,标记 该帧缓存结构体中的 frameBuffer结构体数组被占用了的buffer 数量。一共10个
_frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].usedSize = len;
_frameBufferQueue.writeIndex = (_frameBufferQueue.writeIndex + 1) % _frameBufferQueue.count;
_frameBufferQueue.usedCount++;
b.4
//pImageBuf 用于存储用于预览的数据,在没有开始录像之前 数据都会拷贝到这里 传递给app 显示
memset(pImageBuf, 0, size); 将pImageBuf区域清空
memcpy(pImageBuf, pFrameData, size); 从共享内存中获取图像数据 拷贝到 pImageBuf区域
realImageSize = size; 即 struct v4l2_buffer buf.bytesused 缓冲区中数据占用的字节数
//预览不走 save2ImageFile() 点击拍照的时候,_startCaptureImage会被置1,才会走这里保存图片
save2ImageFile(pImageBuf, realImageSize, imageFilePath);将图像信息写入 videoFilePath 文件
stopCaptureImage()
c 把数据放回缓存队列
if (SUCCESS_LOCAL != v4l2_ioctl_qbuf(fd, &buf, buf.index))
同样走JNI
jint Java_com_camera_simplewebcam_CameraPreview_captureImage(JNIEnv *env, jobject thiz) {
int ret = 0;
ENTER_FUNC_LOG();
ret = captureImage();
EXIT_FUNC_LOG();
return ret;
}
captureImage(void)
/* Re-build the file name 创建图片文件
参数是FALSE -> videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.jpeg
*/
buildFileName(FALSE);
_startCaptureImage = TRUE;//标记开始获取图像,这里标记好后,预览就开始保存拍照数据到刚刚创建的图片文件
拍照 在预览中的 save2ImageFile()
同理走JNI
/* 开始录像 */
jint Java_com_camera_simplewebcam_CameraPreview_startRecording(JNIEnv *env, jobject thiz) {
int ret = 0;
ENTER_FUNC_LOG();
ret = startRecording();
EXIT_FUNC_LOG();
return ret;
}
startRecording(void)主要工作
1 检查摄像头是否在工作
2 重新生成文件名 : videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.avi
3 准备缓冲区以存储帧
4 打开 AVI 格式视频频文件
5 设置AVI 格式视频频文件
视频帧的宽度 640
视频帧的高度 480
每秒帧数 15
压缩类型 MJPG
6 保存录制开始时间
7 设置录像的标志 _startRecording = 1
#define FRAMEBUFFER_MAX_COUNT (10)
/* 定义FrameBuffer结构体 */
typedef struct FrameBuffer{
char *buffer; //存储图像 指向大小为imageBufSize 初始值为0的数组。用于存储图像
int size; //imageBufSize
int usedSize;
}FrameBuffer;
/* 帧缓冲区结构体 */
typedef struct FrameBufferQueue{
FrameBuffer frameBuffer[FRAMEBUFFER_MAX_COUNT];//FrameBuffer 结构体数组 大小10
int readIndex;
int writeIndex;
int usedCount;
int count;//记录初始化了多少个 frameBuffer结构体 10个
}FrameBufferQueue;
/* 帧缓冲区 */
#define FRAMEBUFFER_MAX_COUNT (10)
static FrameBufferQueue _frameBufferQueue;//帧缓冲区变量
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
1 if (TRUE != isCameraOpened()) 检查摄像头是否在工作
2 buildFileName(TRUE);重新生成文件名:videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.avi
3 initFrameBuffer(); 创建缓冲区,录像时候 在预览中将共享内存中的数据信息拷贝到该缓冲区
1 memset(&_frameBufferQueue, 0, sizeof(FrameBufferQueue));
初始化 _frameBufferQueue 帧缓冲结构体 空间都为 0
2 初始化FrameBuffer结构体数组中的每一个 FrameBuffer结构体的buffer指针变量,用于存储图像.将其指向一个大小为imageBufSize的空间,可以看成数组
_frameBufferQueue.frameBuffer[index].buffer = (char *)calloc(1, imageBufSize);
3 继续初始化 FrameBuffer结构体数组中的每一个 FrameBuffer结构体的size成员变量,记录大小
_frameBufferQueue.frameBuffer[index].size = imageBufSize;
4 继续FrameBuffer结构体数组中的每一个 FrameBuffer结构体的count成员变量 加1 计数。记录初始化了多少个 帧缓冲结构体
_frameBufferQueue.count++;
4 Open video file 打开 AVI 格式视频频文件 : videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.avi
avifile = AVI_open_output_file(videoFilePath);//这个是AVI文件格式相关,暂时我太了解。
5 设置AVI 格式视频频文件 : AVI_set_video(avifile, 640, 480, 15, "MJPG");
视频帧的宽度 640
视频帧的高度 480
每秒帧数 15
压缩类型 MJPG
6 设置录像的标志 _startRecording = 1
7 录像,在预览中的 qFrameBuffer(),将共享内存中的数据拷贝到目标缓存
A.c
#include "jni.h"
#include "JNIHelp.h"
#include
#include
#include
#include
#include
#include
#include
#include /* low-level i/o */
#include
#include
#include
#include
#include
#include
#include
#include
#include /* for videodev2.h */
#include
#include
#include "ioctlUtil.h"
#include "ioctlLog.h"
#include "avilib.h"
#define TRUE (1)
#define FALSE (0)
#define CLEAR(x) memset (&(x), 0, sizeof (x))
#define MIN_AVAILABLE_SIZE_FOR_VIDEO (2*1024*1024)
#define MIN_AVAILABLE_SIZE_FOR_IMAGE (20*1024)
#define FILEPATH_LEN (64)
#define IMAGE_FILE_PATH ("/sdcard/DCIM/Camera/")
#define VIDEO_FILE_PATH ("/sdcard/DCIM/Camera/")
#define FRAMEBUFFER_MAX_COUNT (10)
/* 定义FrameBuffer结构体 */
typedef struct FrameBuffer{
char *buffer; //存储图像 指向大小为imageBufSize 初始值为0的数组。用于存储图像
int size; //imageBufSize
int usedSize;
}FrameBuffer;
/* 帧缓冲区结构体 */
typedef struct FrameBufferQueue{
FrameBuffer frameBuffer[FRAMEBUFFER_MAX_COUNT];//结构体数组 大小10
int readIndex;
int writeIndex;
int usedCount;
int count;//记录初始化了多少个 frameBuffer结构体积 10个
}FrameBufferQueue;
struct buffer {
void * start;//开始位置
size_t length;//长度
};
static char dev_name[16];/* 用于存储 /dev/video[0-3]字符 */
static int fd = -1; /*目标设备*/
struct buffer * buffers = NULL;//申请空间地址
static unsigned int n_buffers = 0;
static int camerabase = -1;
static char *pImageBuf = NULL;
static int imageBufSize = 0;//存储图像占用的总字节数
static int realImageSize = 0;
static int image_index = 0;
/*
Flag to capture image as file
0: Not save the current frame to file
1: Save the current frame to file
*/
static int _startCaptureImage = FALSE;
/*
Flag to indicate if this camera is opened normally
0: Not opened
1: Opend normally
*/
static int _cameraStart = FALSE;
/*
Flag to indicate if the recording is required
0: Not recording
1: Recording
*/
static int _startRecording = FALSE;
/* the start time of this video file */
static struct timeval _recordingStart;
/* the end time of this recorded video file */
static struct timeval _recordingStop;
/* 保存录制的视频文件路径 Save the recorded video file path */
char videoFilePath[FILEPATH_LEN] = {0};//#define FILEPATH_LEN (64)
/* Save the captured image's path */
char imageFilePath[FILEPATH_LEN] = {0};
/* For avilib */
static avi_t *avifile = NULL;//avi文件
/* Save the count of frames in the recorded video file */
static int framecount = 0;
/* Record the sequence of frame */
static int _sequence = 0;
/* 帧缓冲区 */
static FrameBufferQueue _frameBufferQueue;//帧缓冲区变量
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static int xioctl(int fh, int request, void *arg)
{
int r;
do {
LOGE( "Running Request: 0x%x", request);
printArgInfo(request, arg);
r = ioctl(fh, request, arg);
} while (-1 == r && EINTR == errno);
LOGE( "After Running Request: 0x%x", request);
printArgInfo(request, arg);
return r;
}
static void buildFileName(int isVideo) {
// Get the current time
char filename[32] = {0};
time_t t_time;
struct tm *pTime;
ENTER_FUNC_LOG();
time(&t_time);
pTime = gmtime(&t_time);
/*以 年月日时分秒 来命名文件名称*/
sprintf(filename, "%d%02d%02d%02d%02d_%02d",\
(1900+pTime->tm_year), (1+pTime->tm_mon), pTime->tm_mday, \
pTime->tm_hour, pTime->tm_min, pTime->tm_sec);
if (isVideo == TRUE) {
memset(videoFilePath, 0, sizeof(char) * FILEPATH_LEN);//初始化 录制视频的路径 为 0
/*
#define VIDEO_FILE_PATH ("/sdcard/DCIM/Camera/")
videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.avi
*/
sprintf(videoFilePath, "%s%s.%s", VIDEO_FILE_PATH, filename, "avi");
LOGE("video file path: %s", videoFilePath);
} else {
memset(imageFilePath, 0, sizeof(char) * FILEPATH_LEN);
/*
#define VIDEO_FILE_PATH ("/sdcard/DCIM/Camera/")
videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.jpeg
*/
sprintf(imageFilePath, "%s%s.%s", IMAGE_FILE_PATH, filename, "jpeg");
LOGE("image file path: %s", imageFilePath);
}
EXIT_FUNC_LOG();
}
/* 检查摄像头是否在工作 */
static int isCameraOpened() {
if (-1 != fd && TRUE == _cameraStart) {
return TRUE;
} else {
LOGE("Camera doesn't run");
}
return FALSE;
}
/*
通过stat()获取/dev/video[0-3]文件信息,目的是为了确认有几个 video设备,这里只能存在4个 或者 一个都不存在
*/
int checkCamerabase(void){
struct stat st;
int i;
int start_from_4 = 1;
/* if /dev/video[0-3] exist, camerabase=4, otherwise, camrerabase = 0 */
for(i=0 ; i<4 ; i++){
sprintf(dev_name,"/dev/video%d",i);
if (-1 == stat (dev_name, &st)) {
start_from_4 &= 0;
}else{
start_from_4 &= 1;
}
}
if(start_from_4){
return 4;
}else{
return 0;
}
}
/*
i = 4
打开 /dev/videox
*/
int openDevice(int i)
{
struct stat st;
ENTER_FUNC_LOG();
sprintf(dev_name,"/dev/video%d",i);// 将 /dev/video4 写入 dev_name
/* for debugging to check if the current application is system app or not */
uid_t uid = getuid();
uid_t euid = geteuid();
LOGE("uid: %d, euid: %d", uid, euid);
/* Get this device's information */
if (-1 == stat (dev_name, &st)) {
LOGE("Cannot identify '%s': %d, %s", dev_name, errno, strerror (errno));
return ERROR_LOCAL;
}
if (!S_ISCHR (st.st_mode)) {
LOGE("%s is no device", dev_name);
return ERROR_LOCAL;
}
//打开 /dev/video4
fd = open (dev_name, O_RDWR | O_NONBLOCK, 0);
if (-1 == fd) {
LOGE("Cannot open '%s': %d, %s", dev_name, errno, strerror (errno));
return ERROR_LOCAL;
}
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
/* 初始化设备
1 ioctl(VIDIOC_QUERYCA) : 查询驱动功能 确认是视频设备 即确认是一个摄像头设备
2 ioctl(VIDIOC_CROPCAP) : 查询驱动的修剪能力
3 ioctl(VIDIOC_S_CROP) : 尝试设置视频信号的边框
4 ioctl(VIDIOC_S_FMT) : 设置当前摄像头的频捕获格式
5 ioctl(VIDIOC_S_PARM) : 设置数据流帧率等参数,每秒30帧
6 设置一行图像占用的字节书为 2*视频的宽
7 设置图像占用的总字节数
8 initMmap ()
1 ioctl(VIDIOC_REQBUFS): 分配内存
2 申请空间 4 * sizeof (*buffers)
3 ioctl(VIDIOC_QUERYBUF) : 查询所分配的缓冲区 ,把 VIDIOC_REQBUFS 中分配的数据缓存地址转换成物理地址
4 mmap()映射存储区 与 磁盘文件fd中的目标空间
5 初始化 4个 buffer 空间都为 0xab
*/
int initDevice(void)
{
struct v4l2_capability cap;
struct v4l2_cropcap cropcap;
struct v4l2_crop crop;
struct v4l2_format fmt;
struct v4l2_streamparm params;
struct v4l2_fmtdesc fmtdesc;
unsigned int min;
int retry = 0;
int maxRetry = 3;
int ret = 0;
int formatIndex = 0;
LOGE("Starting %s", __FUNCTION__);
/* ioctl(VIDIOC_QUERYCA) : 获取设备支持的操作 确认是摄像头设备,即确认是一个摄像头设备 */
if (SUCCESS_LOCAL != v4l2_ioctl_querycap(fd, &cap)) {
return errnoexit("VIDIOC_QUERYCAP");
}
/* ioctl(VIDIOC_CROPCAP):查询驱动的修剪能力 */
if (SUCCESS_LOCAL == v4l2_ioctl_cropcap(fd, &cropcap)) {
/* ioctl(VIDIOC_S_CROP) : 尝试设置视频信号的边框 */
v4l2_ioctl_set_crop(fd, &crop, &cropcap);
} else {
errnoexit("VIDIOC_S_FMT");
}
/* ioctl(VIDIOC_S_FMT) 设置当前摄像头的频捕获格式 */
if (SUCCESS_LOCAL != v4l2_ioctl_set_fmt(fd, &fmt)) {
return errnoexit("VIDIOC_S_FMT");
}
/* ioctl(VIDIOC_S_PARM) :设置数据流帧率等参数,每秒30帧 set frame rate 30fps. */
if (SUCCESS_LOCAL != v4l2_ioctl_set_streamparam(fd, ¶ms)) {
errnoexit("VIDIOC_S_PARM");
}
min = fmt.fmt.pix.width * 2; // v4l2_pix_format.width * 16) >> 3 图像每行字节数,每个像素 16位即2个字节
//设置一行图像占用的字节书为 2*视频的宽
if (fmt.fmt.pix.bytesperline < min)
fmt.fmt.pix.bytesperline = min;
min = fmt.fmt.pix.bytesperline * fmt.fmt.pix.height;//视频一行图像占用的字节数 * 视频的高 = 图像占用的总字节
//设置图像占用的总字节数 fmt.fmt.pix.sizeimage
if (fmt.fmt.pix.sizeimage < min)
fmt.fmt.pix.sizeimage = min;
/* 存储图像占用的总字节数 Store the image size that driver wants */
imageBufSize = fmt.fmt.pix.sizeimage;
/*
1 ioctl(VIDIOC_REQBUFS): 请求系统分配缓冲区
2 申请空间 4 * sizeof (*buffers)
3 ioctl(VIDIOC_QUERYBUF) : 查询所分配的缓冲区 ,把 VIDIOC_REQBUFS 中分配的数据缓存地址转换成物理地址
4 mmap()映射存储区 与 磁盘文件fd中的目标空间
5 初始化 4个 buffer 空间都为 0xab
*/
if (SUCCESS_LOCAL !=initMmap ()) {
return ERROR_LOCAL;
}
if (SUCCESS_LOCAL != createMutex()) {
return ERROR_LOCAL;
}
return SUCCESS_LOCAL;
}
/*
1 ioctl(VIDIOC_REQBUFS): 请求系统分配缓冲区
2 申请空间 4 * sizeof (*buffers)
3 ioctl(VIDIOC_QUERYBUF) : 查询所分配的缓冲区 获取内部buf信息到pBuf
4 mmap()映射存储区 与 磁盘文件fd中的目标空间
5 初始化 4个 buffer 空间都为 0xab
*/
static int initMmap(void)
{
struct v4l2_requestbuffers req;//用于映射 kernel buffer信息
struct v4l2_buffer buf;//video buffer info
ENTER_FUNC_LOG();
/* ioctl(VIDIOC_REQBUFS): 请求系统分配缓冲区 */
if (SUCCESS_LOCAL != v4l2_ioctl_reqbuf(fd, &req)) {
return errnoexit("VIDIOC_REQBUFS");
}
/* 已经定义为4 在缓存队列里保存4张照片 */
if (req.count < 2) {
LOGE("Insufficient buffer memory on %s", dev_name);
return ERROR_LOCAL;
}
/*申请空间 4 * sizeof (*buffers) 用于存储buffer信息
struct buffer * buffers
struct buffer {
void * start;//开始位置
size_t length;//长度
};
*/
buffers = (struct buffer*)calloc (req.count, sizeof (*buffers));
if (!buffers) {
LOGE("Out of memory");
return ERROR_LOCAL;
}
/* n_buffers == 4 */
for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
/* ioctl(VIDIOC_QUERYBUF) : 查询所分配的缓冲区,获取内部buf信息到v4l2_buffer buf */
if ( SUCCESS_LOCAL != v4l2_ioctl_querybuf(fd, &buf, n_buffers)) {
return errnoexit("VIDIOC_QUERYBUF");
}
/*用从驱动获取的内部buffer 设置上面分配的四个用户空间的 buffers信息
1 用用从驱动获取的内部buffer的信息中的buf长度信息 初始化 上面分配的四个用户空间的 buffers信息的每个 buffer的 长度
2 映射每个 bufffer
*/
buffers[n_buffers].length = buf.length;
/* 将一个文件或者其它对象映射到进程的地址空间,实现文件磁盘地址和进程虚拟地址空间中一段虚拟地址的一一对映关系
存储映射I/O使一个 磁盘文件 与 存储空间 中的一个缓冲区相映射。对缓冲区的操作,相当于对文件的操作。从缓冲区取数据,相当于读文件中的相应文件;将数据写入缓冲区,则数据自动地写入文件。这样就可以在不使用read和write的情况下执行I/O。
NULL : 映射存储区的起始地址,通常设置为NULL,由内核选择该映射区的起始地址。
buf.length : 存储区长度,单位:字节
PROT_READ | PROT_WRITE : 对映射存储区的保护要求
MAP_SHARED : 确定映射的更新对映射相同区域的其他进程是否可见,以及是否对文件执行更新。
fd : 要映射文件的文件描述符
buf.m.offset : 要映射字节在文件中的起始偏移量。
成功,返回映射区域的起始地址
*/
buffers[n_buffers].start = mmap (NULL ,buf.length,PROT_READ | PROT_WRITE,MAP_SHARED,fd, buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start) {
return errnoexit ("mmap");
}
LOGE("buffers[%d].start = 0x%x", n_buffers, buffers[n_buffers].start);
/*
将指针变量 buffers[n_buffers].star 所指向的前 buf.length 字节的内存单元用一个 0xab 替换
即 初始化 4个 buffer空间都为 0xab
*/
memset(buffers[n_buffers].start, 0xab, buf.length);
}
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
/* 开始录像
1 将缓冲区放入队列
2 启动摄像头
*/
int startCapturing(void)
{
unsigned int i;
struct v4l2_buffer buf;//video buffer info
ENTER_FUNC_LOG();
//n_buffers == 4
for (i = 0; i < n_buffers; ++i) {
struct v4l2_buffer buf;//buf缓冲区
CLEAR(buf);
/*
设置 struct v4l2_buffer *pBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE, 在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 struct v4l2_buffer 应用使用内存映射的方式从V4L2驱动申请buffer
设置 struct v4l2_buffer id number of the buffer
*/
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = i;
/* 将缓冲区放入队列 */
if (-1 == xioctl(fd, VIDIOC_QBUF, &buf))
errnoexit("VIDIOC_QBUF");
}
/* 启动摄像头 Ask this device to start capture */
if (SUCCESS_LOCAL != v4l2_ioctl_streamon(fd)) {
return errnoexit ("VIDIOC_STREAMON");
} else {
// Set flag to indicate this camera is opened normally
_cameraStart = 1;
}
_sequence = 0;
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
/* 获取图像信息接口 */
int readframeonce(void)
{
int ret = 0;
ENTER_FUNC_LOG();
for (;;) {
fd_set fds;
struct timeval tv;
int r;
FD_ZERO (&fds);
FD_SET (fd, &fds);
tv.tv_sec = 2000;
tv.tv_usec = 0;
r = select (fd + 1, &fds, NULL, NULL, &tv);
if (-1 == r) {
if (EINTR == errno) {
errnoexit("select");
continue;
}
return errnoexit ("select");
}
if (0 == r) {
LOGE("select timeout");
return ERROR_LOCAL;
}
ret = readFrame();//!!
if (ret == SUCCESS_LOCAL) {
break;
} else if ( ret == 19 ) {
LOGE("Error return");
return ERROR_LOCAL;
} else {
continue;
}
}
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
/*
save2ImageFile(pImageBuf, realImageSize, imageFilePath);
*/
static int save2ImageFile(const void *pImageData, int imageSize, const char *pFilename) {
FILE *f = NULL;
char fileName[64] = {0};
int ret = 0;
struct stat cameraStat;
int imageDirExist = 0;
ENTER_FUNC_LOG();
/* Check if the arguments are valid */
if (NULL == pImageData || NULL == pFilename) {
LOGE("The arguments are invalid in %s", __FUNCTION__);
return INVARG_LOCAL;
}
if (access(IMAGE_FILE_PATH, F_OK) == 0) {
if (stat(IMAGE_FILE_PATH, &cameraStat) == 0) {
if (S_ISDIR(cameraStat.st_mode)) {
imageDirExist = 1;
}
}
}
if (imageDirExist == 0) {
if (0 ==mkdir(IMAGE_FILE_PATH, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP|S_IROTH|S_IWOTH)) {
LOGE("Success to create diretory: %s", IMAGE_FILE_PATH);
} else {
LOGE("Failed to create %s, errno: %d, %s", IMAGE_FILE_PATH, errno, strerror(errno));
return ERROR_LOCAL;
}
}
/* Open the file */
f = fopen(pFilename, "w+b");
if (f == NULL) {
LOGE("Failed to open file: %s, errno: %d, %s", pFilename, errno, strerror(errno));
return ERROR_LOCAL;
}
/* Write image data to file */
ret = fwrite(pImageData, imageSize, 1, f);
if (ret < 1) {
LOGE("Failed to write data to file: %s, errno: %d, %s", pFilename, errno, strerror(errno));
}
/* Close this file */
ret = fclose(f);
if (0 != ret) {
LOGE("Failed to close file: %s, errno: %d, %s", pFilename, errno, strerror(errno));
}
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
int recording(void) {
ENTER_FUNC_LOG();
// Retrive the image data from _frameBufferQueue
FrameBuffer *frameBuffer = dqFrameBuffer();
if (NULL == frameBuffer) {
LOGE("Cannot retrieve frame buffer from queue");
return ERROR_LOCAL;
}
/* Save the image data to Avi */
save2Avi(frameBuffer->buffer, frameBuffer->usedSize);
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
/*
利用VIDIOC_G_CTRL得到一些设置:
一些具体的设置,如曝光模式(Exposure Type),曝光值(Exposure),增益(Gain),白平衡(WHITE_BALANCE),亮度(BRIGHTNESS),饱和度(SATURATION),对比度(CONTRAST)等信息。可以通过VIDIOC_G_CTRL得到当前值。
*/
int readbackCtrl()
{
/*
APP 填写结构体中的id. 通过调用VIDIOC_G_CTRL,driver 会填写结构体中value项
*/
struct v4l2_control v4l2_ctrl = { .id = V4L2_CID_BACKLIGHT_COMPENSATION };
//LOGE("zjd++ readbackCtrl before hjc \n");
/* 利用VIDIOC_G_CTRL得到一些设置 */
if (-1 == xioctl(fd, VIDIOC_G_CTRL, &v4l2_ctrl)) {
return errnoexit ("VIDIOC_G_CTRL");
//fprintf(stderr, "%s error %d, %s\n", "VIDIOC_G_CTRL", errno, strerror(errno));;
}
//LOGE("zjd++ readbackCtrl is %d\n",v4l2_ctrl.value);
//*result = v4l2_ctrl.value;
return SUCCESS_LOCAL;
}
static int processFrame (const void *pFrameData, int size)
{
int ret = 0;
//LOGE("zjd++ readbackCtrl before \n");
/* 利用VIDIOC_G_CTRL得到一些设置 */
readbackCtrl();
//LOGE("zjd++ readbackCtrl after \n");
/*
尝试将图像保存到AVI文件
预览不走 qFrameBuffer() 点击录像的时候,_startRecording会被置1,才会走这里。
*/
if (_startRecording) {
qFrameBuffer((char *)pFrameData, size);
//save2Avi(pFrameData, size);
}
/* Convert the image data to the complete MJPEG file */
/*ret = convert2MJPEG(pFrameData ,size);
if (SUCCESS_LOCAL != ret) {
if (_startCaptureImage) {
stopCaptureImage();
}
LOGE("Failed to process MJpeg");
return ret;
}*/
/*保存预览数据 传递给app显示*/
memset(pImageBuf, 0, size);
memcpy(pImageBuf, pFrameData, size);
realImageSize = size;
/* Try to save the MJPEG file
预览不走 save2ImageFile() 点击拍照的时候,_startCaptureImage会被置1,才会走这里保存图片。
*/
if (_startCaptureImage) {
ret = save2ImageFile(pImageBuf, realImageSize, imageFilePath);
stopCaptureImage();
}
return ret;
}
static int readFrame(void)
{
struct v4l2_buffer buf;
unsigned int i;
int ret = 0;
/* xioctl (VIDIOC_DQBUF) 把数据从缓存中读取出来*/
ret = v4l2_ioctl_dqbuf(fd, &buf);//数据存储于 struct v4l2_buffer buf
if (SUCCESS_LOCAL != ret) {
if (EAGAIN == ret) {
LOGE("No buffer was in the outgoing queue");
return SUCCESS_LOCAL;
}
return errnoexit("VIDIOC_DQBUF");
}
assert (buf.index < n_buffers);
/* 检查某些帧是否丢失 Check if some frames are lost */
if ((_sequence + 1) != buf.sequence) {
LOGE("Some Frames are lost, the last frame is %d, the current frame is %d", _sequence, buf.sequence);
}
/* 记录最后一帧的顺序 Record the last the sequence of frame */
_sequence = buf.sequence;
/*
buffers[buf.index].start : buffers[struct v4l2_buffer buf.index].start
struct v4l2_buffer buf.bytesused 缓冲区中数据占用的字节数
*/
processFrame (buffers[buf.index].start, buf.bytesused);
/* 把数据放回缓存队列 */
if (SUCCESS_LOCAL != v4l2_ioctl_qbuf(fd, &buf, buf.index))
return errnoexit ("VIDIOC_QBUF");
//EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
/*
准备缓冲区以存储帧
初始化 _frameBufferQueue 帧缓冲结构体 空间都为 0
初始化 FrameBuffer结构体数组中的每一个 FrameBuffer结构体的buffer指针变量,指向大小为imageBufSize 初始值为0的数组。用于存储图像
初始化 FrameBuffer结构体数组中的每一个 FrameBuffer结构体的size成员变量,记录大小
初始化 FrameBuffer结构体数组中的每一个 FrameBuffer结构体的count成员变量 加1 计数。记录初始化了多少个 帧缓冲结构体
*/
static int initFrameBuffer(void) {
int index = 0;
ENTER_FUNC_LOG();
/*
初始化 _frameBufferQueue 帧缓冲结构体 空间都为 0
*/
memset(&_frameBufferQueue, 0, sizeof(FrameBufferQueue));
/*
#define FRAMEBUFFER_MAX_COUNT (10)
imageBufSize 存储图像占用的总字节数
*/
if (imageBufSize != 0) {
for (index = 0; index < FRAMEBUFFER_MAX_COUNT; index++) {
/*
初始化FrameBuffer结构体数组中的每一个 FrameBuffer结构体的buffer指针变量,用于存储图像
calloc():分配足够的空间给1个大小为imageBufSize的数组,并初始化该空间都为0。
*/
_frameBufferQueue.frameBuffer[index].buffer = (char *)calloc(1, imageBufSize);
/* 如果初始化成功 */
if (NULL != _frameBufferQueue.frameBuffer[index].buffer) {
/*
继续初始化 FrameBuffer结构体数组中的每一个 FrameBuffer结构体的size成员变量,记录大小
FrameBuffer结构体数组中的每一个 FrameBuffer结构体的count成员变量 加1 计数。记录初始化了多少个 帧缓冲结构体
*/
_frameBufferQueue.frameBuffer[index].size = imageBufSize;
_frameBufferQueue.count++;
} else {
LOGE("Failed to allocate %d frame buffer", index);
uninitFrameBuffer();
return ERROR_LOCAL;
}
}
} else {
LOGE("imageBufSize isn't initialized");
return ERROR_LOCAL;
}
/* 打印 初始化了多少个 帧缓冲结构体 以及 其大小 */
LOGE("_frameBufferQueue info: count(%d), framebuffer size(%d)", _frameBufferQueue.count, imageBufSize);
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
/* */
static int qFrameBuffer(const char *imageData, int len) {
ENTER_FUNC_LOG();
//检查参数是否有效 Check if the argument is valid
if (NULL == imageData) {
LOGE("Invalid argument(imageData) in %s", __FUNCTION__);
return INVARG_LOCAL;
}
lockMutex();
if (0 == _frameBufferQueue.count) {
LOGE("_frameBufferQueue isn't initialized");
unlockMutex();
return ERROR_LOCAL;
} else {
if (_frameBufferQueue.usedCount == _frameBufferQueue.count) {
LOGE("_frameBufferQueue is full, this frame will be missed");
unlockMutex();
return ERROR_LOCAL;
} else if (_frameBufferQueue.writeIndex >= _frameBufferQueue.count) {
LOGE("_frameBufferQueue.writeIndex(%d) is out of range, count(%d)", _frameBufferQueue.writeIndex, _frameBufferQueue.count);
unlockMutex();
return ERROR_LOCAL;
} else {
/* 初始化 frameBufferQueue.frameBuffer[]结构体数组 第 _frameBufferQueue.writeIndex 个 frameBuffer结构体缓存区,buffer 0到size 的区域 初始化为0*/
memset(_frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer, 0, _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].size);
/* 初始化 frameBufferQueue.frameBuffer[]结构体数组 第 _frameBufferQueue.writeIndex 个 frameBuffer结构体缓存区 usedSize = 0*/
_frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].usedSize = 0;
/*
len = struct v4l2_buffer buf.bytesused 缓冲区中数据占用的字节数
*/
if (_frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].size >= len) {
/* 从 imageData 指向的空间 复制 len 个字节的数据 到 _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer frameBuffer结构体数组中的 buffer
即 从 用于存储缓存信息(地址 长度)的 struct buffer *buffers.start 地址处 拷贝len长度字节数据到 _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer地址
*/
memcpy(_frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].buffer, imageData, len);
/*
设置 帧缓存结构体中的 frameBuffer结构体数组中的 usedSize = len 标记已经拷贝到数据的长度
设置 帧缓存结构体中的 writeIndex
设置 帧缓存结构体中的 usedCount++,标记 该帧缓存结构体中的 frameBuffer结构体数组被占用了的buffer 数量。一共10个
*/
_frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].usedSize = len;
_frameBufferQueue.writeIndex = (_frameBufferQueue.writeIndex + 1) % _frameBufferQueue.count;
_frameBufferQueue.usedCount++;
} else {
LOGE("image data size(%d) exceeds buffer size(%d)", len, _frameBufferQueue.frameBuffer[_frameBufferQueue.writeIndex].size);
}
}
}
unlockMutex();
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
static FrameBuffer *dqFrameBuffer(void) {
int readIndex = 0;
ENTER_FUNC_LOG();
lockMutex();
if (0 == _frameBufferQueue.count) {
LOGE("_frameBufferQueue isn't initialized");
unlockMutex();
return NULL;
} else {
if (_frameBufferQueue.usedCount == 0) {
LOGE("_frameBufferQueue is empty");
unlockMutex();
return NULL;
} else if (_frameBufferQueue.readIndex >= _frameBufferQueue.count) {
LOGE("_frameBufferQueue.readIndex(%d) is out of range, count(%d)", _frameBufferQueue.readIndex, _frameBufferQueue.count);
unlockMutex();
return NULL;
} else if (_frameBufferQueue.frameBuffer[_frameBufferQueue.readIndex].usedSize == 0) {
LOGE("_frameBufferQueue.frameBuffer[%d] is empty", _frameBufferQueue.readIndex);
unlockMutex();
return NULL;
} else {
readIndex = _frameBufferQueue.readIndex;
_frameBufferQueue.readIndex = (_frameBufferQueue.readIndex + 1) % _frameBufferQueue.count;
_frameBufferQueue.usedCount--;
}
}
unlockMutex();
EXIT_FUNC_LOG();
return &_frameBufferQueue.frameBuffer[readIndex];
}
/*录像打开预览
1 检查摄像头是否在工作
2 重新生成文件名 : videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.avi
3 准备缓冲区以存储帧
4 打开 AVI 格式视频频文件
5 设置AVI 格式视频频文件
视频帧的宽度 640
视频帧的高度 480
每秒帧数 15
压缩类型 MJPG
6 保存录制开始时间
7 设置录音的标志 _startRecording = 1
*/
int startRecording(void) {
ENTER_FUNC_LOG();
/* 检查摄像头是否在工作 Check if the camera is running */
if (TRUE != isCameraOpened()) {
LOGE("Camera doesn't run");
return DEVICE_NOT_OPEN;
}
/* 重新生成文件名 Re-build the file name
videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.avi
*/
buildFileName(TRUE);
/* 准备缓冲区以存储帧 Prepare the buffer to store the frame
初始化 _frameBufferQueue 帧缓冲结构体 空间都为 0
初始化 FrameBuffer结构体数组中的每一个 FrameBuffer结构体的buffer指针变量,指向大小为imageBufSize 初始值为0的数组。用于存储图像
初始化 FrameBuffer结构体数组中的每一个 FrameBuffer结构体的size成员变量,记录大小
初始化 FrameBuffer结构体数组中的每一个 FrameBuffer结构体的count成员变量 加1 计数。记录初始化了多少个 帧缓冲结构体
*/
initFrameBuffer();
// AVI相关!
/* Open video file 打开 AVI 格式视频频文件
videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.avi
*/
avifile = AVI_open_output_file(videoFilePath);
/* if avifile is NULL, there was an error */
if (avifile == NULL ) {
LOGE("Error opening avifile test.avi\n");
}
else {
/* 默认fps为15,关闭时重置。 we default the fps to 15, we'll reset it on close
视频帧的宽度 640
视频帧的高度 480
每秒帧数 15
压缩类型 MJPG
*/
AVI_set_video(avifile, 640, 480, 15, "MJPG");
LOGE("recording to test.avi\n");
}
/* 保存录制开始时间 Save the recording start time */
memset(&_recordingStart, 0, sizeof(_recordingStart));
gettimeofday(&_recordingStart, 0);
/* 设置录像的标志 Set the flag to ask recording */
_startRecording = 1;
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
/*拍照 打开预览
1 videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.jpeg
2 标记开始拍照
*/
int captureImage(void) {
ENTER_FUNC_LOG();
/* Check if the camera is running */
if (TRUE != isCameraOpened()) {
LOGE("Camera doesn't run");
return DEVICE_NOT_OPEN;
}
/* Re-build the file name
参数是FALSE -> videoFilePath = /sdcard/DCIM/Camera/年月日时分秒.jpeg
*/
buildFileName(FALSE);
_startCaptureImage = TRUE;//标记开始获取图像
EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
int setDirectBuffer(char *pDirectBuffer) {
//ENTER_FUNC_LOG();
if (NULL != pDirectBuffer) {
pImageBuf = pDirectBuffer;
LOGE("pImageBuf: 0x%x, pDirectBuffer: 0x%x", pImageBuf, pDirectBuffer);
} else {
return INVARG_LOCAL;
}
//EXIT_FUNC_LOG();
return SUCCESS_LOCAL;
}
int getFrameBufferSize() {
//ENTER_FUNC_LOG();
//EXIT_FUNC_LOG();
return imageBufSize;
}
int getRealImageSize() {
//ENTER_FUNC_LOG();
//EXIT_FUNC_LOG();
return realImageSize;
}
......
B.c
#include
#include
#include
#include
#include "ioctlLog.h"
#define CLEAR(x) memset(x, 0, sizeof (*x))
/*ioctl
fh : 目标设备
request : VIDIOC_QUERYCAP等ioctl关键词
arg : struct v4l2_capability
*/
static int xioctl(int fh, int request, void *arg)
{
int r;
do {
LOGE( "Running Request: 0x%x", request);
printArgInfo(request, arg);
r = ioctl(fh, request, arg);
} while (-1 == r && EINTR == errno);
LOGE( "After Running Request: 0x%x", request);
printArgInfo(request, arg);
return r;
}
int v4l2_ioctl_fmtdesc(int fd, struct v4l2_fmtdesc *pFmtdesc) {
if (NULL != pFmtdesc && -1 != fd) {
CLEAR(pFmtdesc);
pFmtdesc->index = 0;
pFmtdesc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
do {
if ( -1 == xioctl(fd, VIDIOC_ENUM_FMT, pFmtdesc)) {
errnoexit("VIDIOC_ENUM_FMT");
}
pFmtdesc->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pFmtdesc->index++ ;
}while (EINVAL != errno);
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
/*获取设备支持的操作 确认是摄像头设备*/
int v4l2_ioctl_querycap(int fd, struct v4l2_capability *pCap) {
if (-1 != fd && NULL != pCap) {
CLEAR(pCap);
/*
fd : 目标设备
VIDIOC_QUERYCAP : 获取设备支持的操作 一般是 V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING
*/
if (-1 == xioctl (fd, VIDIOC_QUERYCAP, pCap)) {
if (EINVAL == errno) {
LOGE("This device is no V4L2 device");
return ERROR_LOCAL;
} else {
return errnoexit ("VIDIOC_QUERYCAP");
}
}
/*获取成功,检查是否有视频捕获功能 */
if (!(pCap->capabilities & V4L2_CAP_VIDEO_CAPTURE)) {
LOGE("This device is no video capture device");
return ERROR_LOCAL;
}
/*V4L2_CAP_STREAMING代表我们不是用read、write来读取视频设备,而是用IOctl等接口*/
if (!(pCap->capabilities & V4L2_CAP_STREAMING)) {
LOGE("This device does not support streaming i/o");
return ERROR_LOCAL;
}
}else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
/*
查询驱动的修剪能力
*/
int v4l2_ioctl_cropcap(int fd, struct v4l2_cropcap *pCropcap) {
if (-1 != fd && NULL != pCropcap) {
CLEAR (pCropcap);
/* 设置 v4l2_buf_type 为 视频捕捉模式*/
pCropcap->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
/*
fd : 目标设备
VIDIOC_CROPCAP : 查询驱动的修剪能力
*/
if (0 != xioctl (fd, VIDIOC_CROPCAP, pCropcap)) {
LOGE("Command: VIDIOC_CROPCAP error, errno: %d, %s", errno, strerror(errno));
return ERROR_LOCAL;
}
}else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
/*
设置视频信号的边框
*/
int v4l2_ioctl_set_crop(int fd, struct v4l2_crop *pCrop, struct v4l2_cropcap *pCropcap) {
if (-1 != fd && NULL != pCrop) {
CLEAR(pCrop);
/* 设置 v4l2_buf_type 为 视频捕捉模式*/
pCrop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
/*
将默认窗口大小 初始化给 struct v4l2_crop -> v4l2_rect
*/
pCrop->c = pCropcap->defrect;
/*
VIDIOC_S_CROP : 设置视频信号的边框
*/
if (-1 == xioctl (fd, VIDIOC_S_CROP, pCrop)) {
if (EINVAL == errno) {
LOGE("VIDIOC_S_CROP isn't supported");
}
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
int v4l2_ioctl_get_fmt(int fd, struct v4l2_format *pFormat) {
if (-1 != fd && NULL != pFormat) {
CLEAR(pFormat);
pFormat->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 != xioctl(fd, VIDIOC_G_FMT, pFormat)) {
return errnoexit("VIDIOC_G_FMT");
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
/*
设置当前摄像头的频捕获格式
*/
int v4l2_ioctl_set_fmt(int fd, struct v4l2_format *pFormat) {
int retry = 0;
int maxRetry = 3;
if (-1 != fd && NULL != pFormat) {
CLEAR(pFormat);
/*
设置 pFormat->type 为 V4L2_BUF_TYPE_VIDEO_CAPTURE,在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 图像宽 640
设置 图像高 480
设置 视频数据存储类型为 V4L2_PIX_FMT_MJPEG
设置 Images包含top和bottom 两个field, 隔行交替
*/
pFormat->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pFormat->fmt.pix.width = IMG_WIDTH; //#define IMG_WIDTH 640
pFormat->fmt.pix.height = IMG_HEIGHT;//#define IMG_HEIGHT 480
pFormat->fmt.pix.pixelformat = V4L2_PIX_FMT_MJPEG; // 视频数据存储类型 V4L2_PIX_FMT_YUYV
pFormat->fmt.pix.field = V4L2_FIELD_INTERLACED; //一帧图像分为两场 奇场 偶场
/* 连续设置3次 */
for (retry = 0; retry < maxRetry; retry++) {
/*
设置当前驱动的频捕获格式
*/
if (0 != xioctl (fd, VIDIOC_S_FMT, pFormat)) {
LOGE("Command VIDIOC_S_FMT error, retry...");
continue;
}
}
} else {
return errnoexit("Invalid arguments");
}
/*
如果用户传入超过了实际摄像头支持大小,摄像头会自动缩小成最大支持。这里把摄像头当前支持的宽高情况反馈给用户。
a = pFormat->fmt.pix.width;
b = pFormat->fmt.pix.heightt;
*/
return SUCCESS_LOCAL;
}
/*
设置数据流帧率等参数,每秒30帧
*/
int v4l2_ioctl_set_streamparam(int fd, struct v4l2_streamparm *pStreamparam) {
if (-1 != fd && NULL != pStreamparam) {
CLEAR(pStreamparam);
/*
设置 视频捕捉模式,在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 pStreamparam->parm.capture.timeperframe.numerator = 1
设置 pStreamparam->parm.capture.timeperframe.denominator = 30
代表每秒传输30帧
*/
pStreamparam->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pStreamparam->parm.capture.timeperframe.numerator = 1;
pStreamparam->parm.capture.timeperframe.denominator = 30;
/*
设置数据流帧率等参数,每秒30帧
*/
if (0 != xioctl(fd, VIDIOC_S_PARM, pStreamparam)) {
return errnoexit("VIDIOC_S_PARM");
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
int v4l2_ioctl_get_streamparam(int fd, struct v4l2_streamparm *pStreamparam) {
if (-1 != fd && NULL != pStreamparam) {
CLEAR(pStreamparam);
pStreamparam->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 != xioctl(fd, VIDIOC_G_PARM, pStreamparam)) {
return errnoexit("VIDIOC_G_PARM");
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
int v4l2_ioctl_supported_framesize(int fd, struct v4l2_frmsizeenum *pFrmsize, int *pPixelFormat, int len) {
int idx = 0;
if (-1 != fd && NULL != pFrmsize && NULL != pPixelFormat) {
CLEAR(pFrmsize);
pFrmsize->index = 0;
for (idx = 0; idx < len; idx++) {
pFrmsize->pixel_format = pPixelFormat[idx];
do {
if (0 == xioctl(fd, VIDIOC_ENUM_FRAMESIZES, pFrmsize)) {
if (pFrmsize->type == V4L2_FRMSIZE_TYPE_DISCRETE) {
CLEAR(pFrmsize);
pFrmsize->index++;
pFrmsize->pixel_format = pPixelFormat[idx];
continue;
} else {
break;
}
} else {
if (EINVAL == errno && pFrmsize->type == V4L2_FRMSIZE_TYPE_DISCRETE) {
LOGE("End to list supported frame size");
}
break;
}
} while(1);
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
/*
VIDIOC_REQBUFS: 分配内存
由于摄像头采集数据是放在内部的buf中的,我们需要申请内部buf存放采集数据。一般选择4个内部buf,构造成一个简单的循环buf,方便图片的循环显示。
*/
int v4l2_ioctl_reqbuf(int fd, struct v4l2_requestbuffers *pReqBuf) {
if (-1 != fd && NULL != pReqBuf) {
CLEAR (pReqBuf);
/*
设置 请求的缓存数量 在缓存队列里保存4张照片
设置 V4L2_BUF_TYPE_VIDEO_CAPTURE, 在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 应用使用内存映射的方式从V4L2驱动申请buffer
*/
pReqBuf->count = 4;
pReqBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pReqBuf->memory = V4L2_MEMORY_MMAP;
/*
分配内存
*/
if (0 != xioctl (fd, VIDIOC_REQBUFS, pReqBuf)) {
if (EINVAL == errno) {
LOGE("This device does not support memory mapping");
return ERROR_LOCAL;
} else {
return errnoexit ("VIDIOC_REQBUFS");
}
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
/*
查询所分配的缓冲区,获取内部buf信息到pBuf
struct v4l2_buffer *pBu : 摄像头缓冲buf临时保存pBuf
*/
int v4l2_ioctl_querybuf(int fd, struct v4l2_buffer *pBuf, int bufIndex) {
if (-1 != fd && NULL != pBuf) {
CLEAR(pBuf);
/*
设置 struct v4l2_buffer *pBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE, 在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
设置 struct v4l2_buffer 应用使用内存映射的方式从V4L2驱动申请buffer
设置 struct v4l2_buffer id number of the buffer
*/
pBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pBuf->memory = V4L2_MEMORY_MMAP;
pBuf->index = bufIndex;
/*
查询所分配的缓冲区,获取内部buf信息到pBuf
*/
if (0 != xioctl (fd, VIDIOC_QUERYBUF, pBuf))
return errnoexit ("VIDIOC_QUERYBUF");
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
int v4l2_ioctl_qbuf(int fd, struct v4l2_buffer *pBuf, int bufIndex) {
if (-1 != fd && NULL != pBuf) {
CLEAR (pBuf);
pBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
pBuf->memory = V4L2_MEMORY_MMAP;
pBuf->index = bufIndex;
if (0 != xioctl (fd, VIDIOC_QBUF, pBuf)) {
return errnoexit ("VIDIOC_QBUF");
}
}else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
/* xioctl (VIDIOC_DQBUF) 把数据从缓存中读取出来*/
int v4l2_ioctl_dqbuf(int fd, struct v4l2_buffer *pBuf) {
if (-1 != fd && NULL != pBuf) {
CLEAR (pBuf);
pBuf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
pBuf->memory = V4L2_MEMORY_MMAP;//以内存映射的方式访问
/* 把数据从缓存中读取出来 */
if (-1 == xioctl (fd, VIDIOC_DQBUF, pBuf)) {
LOGE("Command VIDIOC_DQBUF error, errno: %d, %s", errno, strerror(errno));
switch (errno) {
case EAGAIN:
case EIO:
default:
errnoexit ("VIDIOC_DQBUF");
return errno;
}
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
/*
启动摄像头
*/
int v4l2_ioctl_streamon(int fd) {
enum v4l2_buf_type type;
if (-1 != fd) {
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//在Capture中,所有type属性皆为V4L2_BUF_TYPE_VIDEO_CAPTURE
/*
启动摄像头
*/
if (0 != xioctl (fd, VIDIOC_STREAMON, &type)) {
return errnoexit ("VIDIOC_STREAMON");
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
int v4l2_ioctl_streamoff(int fd) {
enum v4l2_buf_type type;
if (-1 != fd) {
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
if (0 != xioctl (fd, VIDIOC_STREAMOFF, &type)) {
return errnoexit ("VIDIOC_STREAMON");
}
} else {
return errnoexit("Invalid arguments");
}
return SUCCESS_LOCAL;
}
头文件 相关数据结构说明。
/**
* struct v4l2_buffer - video buffer info
* @index: id number of the buffer
* @type: enum v4l2_buf_type; buffer type (type == *_MPLANE for multiplanar buffers);
* @bytesused: number of bytes occupied by data in the buffer (payload);
* unused (set to 0) for multiplanar buffers
* @flags: buffer informational flags
* @field: enum v4l2_field; field order of the image in the buffer
* @timestamp: frame timestamp
* @timecode: frame timecode
* @sequence: sequence count of this frame
* @memory: enum v4l2_memory; the method, in which the actual video data is
* passed
* @offset: for non-multiplanar buffers with memory == V4L2_MEMORY_MMAP;
* offset from the start of the device memory for this plane,
* (or a "cookie" that should be passed to mmap() as offset)
* @userptr: for non-multiplanar buffers with memory == V4L2_MEMORY_USERPTR;
* a userspace pointer pointing to this buffer
* @fd: for non-multiplanar buffers with memory == V4L2_MEMORY_DMABUF;
* a userspace file descriptor associated with this buffer
* @planes: for multiplanar buffers; userspace pointer to the array of plane
* info structs for this buffer
* @length: size in bytes of the buffer (NOT its payload) for single-plane
* buffers (when type != *_MPLANE); number of elements in the
* planes array for multi-plane buffers
*
* Contains data exchanged by application and driver using one of the Streaming
* I/O methods.
*/
struct v4l2_buffer {
__u32 index;
__u32 type;
__u32 bytesused;//缓冲区中数据占用的字节数
__u32 flags;
__u32 field;
struct timeval timestamp;
struct v4l2_timecode timecode;
__u32 sequence;
/* memory location */
__u32 memory;
union {
__u32 offset;// V4L2_MEMORY_MMAP offset from the start of the device memory for this plane
unsigned long userptr;
struct v4l2_plane *planes;
__s32 fd;
} m;
__u32 length;
__u32 reserved2;
__u32 reserved;
};
struct v4l2_capability {
__u8 driver[16]; /* driver 域需要和 struct video_device 中的 name 匹配*/
__u8 card[32]; /* i.e. "Hauppauge WinTV" */
__u8 bus_info[32]; /* "PCI:" + pci_name(pci_dev) */
__u32 version; /* should use KERNEL_VERSION() */
/*capabilities 代表设备支持的操作模式,常见的值有 V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING 表示是一个视频捕捉设备并且具有数据流控制模式*/
__u32 capabilities;
__u32 reserved[4];
};
/*
* I N P U T I M A G E C R O P P I N G
*/
struct v4l2_cropcap
{
enum v4l2_buf_type type; // 数据流的类型,应用程序设置
struct v4l2_rect bounds; // 这是 camera 的镜头能捕捉到的窗口大小的局限
struct v4l2_rect defrect; // 定义默认窗口大小,包括起点位置及长,宽的大小,大小以像素为单位
struct v4l2_fract pixelaspect; // 定义了图片的宽高比
};
struct v4l2_crop {
__u32 type; /* enum v4l2_buf_type */
struct v4l2_rect c;
};
/*尺寸相关*/
struct v4l2_rect {
__s32 left;
__s32 top;
__s32 width;
__s32 height;
};
enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_CAPTURE = 1,
V4L2_BUF_TYPE_VIDEO_OUTPUT = 2,
V4L2_BUF_TYPE_VIDEO_OVERLAY = 3,
V4L2_BUF_TYPE_VBI_CAPTURE = 4,
V4L2_BUF_TYPE_VBI_OUTPUT = 5,
V4L2_BUF_TYPE_SLICED_VBI_CAPTURE = 6,
V4L2_BUF_TYPE_SLICED_VBI_OUTPUT = 7,
/* Experimental */
V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY = 8,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE = 9,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
/*
帧的格式,对应命令VIDIOC_G_FMT、VIDIOC_S_FMT等
*/
struct v4l2_format {
enum v4l2_buf_type type;
union {
struct v4l2_pix_format pix; /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
struct v4l2_window win; /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
__u8 raw_data[200]; /* user-defined */
} fmt;
};
min = fmt.fmt.pix.width * 2;
/*
* VIDEO IMAGE FORMAT
*/
struct v4l2_pix_format {
__u32 width; //视频的宽
__u32 height; //视频的高
__u32 pixelformat;// 视频数据存储类型,例如是 YUV4:2:2还是RGB
__u32 field; /* enum v4l2_field */
__u32 bytesperline; /* 一行图像占用的字节数 */
__u32 sizeimage; /* 图像占用的总字节 */
__u32 colorspace; /* 设备的颜色空间 enum v4l2_colorspace */
__u32 priv; /* private data, depends on pixelformat */
__u32 flags; /* format flags (V4L2_PIX_FMT_FLAG_*) */
};
/*
* E N U M S
*/
enum v4l2_field {
V4L2_FIELD_ANY = 0, /* driver can choose from none,
top, bottom, interlaced
depending on whatever it thinks
is approximate ... */
V4L2_FIELD_NONE = 1, /* this device has no fields ... */
V4L2_FIELD_TOP = 2, /* top field only */
V4L2_FIELD_BOTTOM = 3, /* bottom field only */
V4L2_FIELD_INTERLACED = 4, /* both fields interlaced */
V4L2_FIELD_SEQ_TB = 5, /* both fields sequential into one
buffer, top-bottom order */
V4L2_FIELD_SEQ_BT = 6, /* same as above + bottom-top order */
V4L2_FIELD_ALTERNATE = 7, /* both fields alternating into
separate buffers */
V4L2_FIELD_INTERLACED_TB = 8, /* both fields interlaced, top field
first and the top field is
transmitted first */
V4L2_FIELD_INTERLACED_BT = 9, /* both fields interlaced, top field
first and the bottom field is
transmitted first */
};
/* Stream type-dependent parameters
*/
struct v4l2_streamparm {
__u32 type; /* enum v4l2_buf_type */
union {
struct v4l2_captureparm capture;
struct v4l2_outputparm output;
__u8 raw_data[200]; /* user-defined */
} parm;
};
/*
* CAPTURE PARAMETERS
*/
struct v4l2_captureparm {
__u32 capability; /* Supported modes */
__u32 capturemode; /* Current mode */
struct v4l2_fract timeperframe; /* Time per frame in seconds */
__u32 extendedmode; /* Driver-specific extensions */
__u32 readbuffers; /* # of buffers for read */
__u32 reserved[4];
};
struct v4l2_fract {
__u32 numerator;//FPS的分子
__u32 denominator;// FPS的分母
};
例如: numerator=1, denominator= 30. 则表明每秒30帧。
/* 用于映射 kernel buffer信息
* MEMORY - MAPPING BUFFERS
*/
struct v4l2_requestbuffers {
__u32 count; // 缓存数量,也就是说在缓存队列里保持多少张照片
__u32 type; // enum v4l2_buf_type 数据流类型,必须永远是V4L2_BUF_TYPE_VIDEO_CAPTURE
__u32 memory; // enum v4l2_memory, V4L2_MEMORY_MMAP 或 V4L2_MEMORY_USERPTR
__u32 reserved[2];
};
/*
视频应用可以通过两种方式从V4L2驱动申请buffer
1 V4L2_MEMORY_MMAP, 即内存映射模式
2 V4L2_MEMORY_USERPTR,即用户空间指针模式
*/
enum v4l2_memory {
V4L2_MEMORY_MMAP = 1,
V4L2_MEMORY_USERPTR = 2,
V4L2_MEMORY_OVERLAY = 3,
};
//APP 填写结构体中的id. 通过调用VIDIOC_G_CTRL,driver 会填写结构体中value项
struct v4l2_control
{
__u32 id;
__s32 value;
};
在进行V4L2开发中,一般会用到以下的命令标志符:
VIDIOC_REQBUFS: 分配内存
VIDIOC_QUERYBUF: 查询所分配的缓冲区 , 把VIDIOC_REQBUFS中分配的数据缓存转换成物理地址
VIDIOC_QUERYCAP: 查询驱动功能
VIDIOC_ENUM_FMT: 获取当前驱动支持的视频格式
VIDIOC_S_FMT: 设置当前驱动的频捕获格式
VIDIOC_G_FMT: 读取当前驱动的频捕获格式
VIDIOC_TRY_FMT: 验证当前驱动的显示格式
VIDIOC_CROPCAP: 查询驱动的修剪能力
VIDIOC_S_CROP: 设置视频信号的边框
VIDIOC_G_CROP: 读取视频信号的边框
VIDIOC_QBUF: 把数据放回缓存队列
VIDIOC_DQBUF: 把数据从缓存中读取出来
VIDIOC_STREAMON: 开始视频显示函数
VIDIOC_STREAMOFF: 结束视频显示函数
VIDIOC_QUERYSTD: 检查当前视频设备支持的标准,例如PAL或NTSC。
— Get or set streaming parameters
VIDIOC_G_PARM, 获取置数据流的参数,一般设置帧率等参数
VIDIOC_S_PARM 设置数据流的参数,一般设置帧率等参数
VIDIOC_G_CTRL : 利用VIDIOC_G_CTRL得到一些设置:
一些具体的设置,如曝光模式(Exposure Type),曝光值(Exposure),增益(Gain),白平衡(WHITE_BALANCE),亮度(BRIGHTNESS),饱和度(SATURATION),对比度(CONTRAST)等信息。可以通过VIDIOC_G_CTRL得到当前值。
struct buffer {
void * start;//开始位置
size_t length;//长度
};
static char dev_name[16];/* 用于存储 /dev/video[0-3]字符 */
static int fd = -1; /*目标设备*/
struct buffer * buffers = NULL;//申请空间地址
static unsigned int n_buffers = 0;
/* see also http://vektor.theorem.ca/graphics/ycbcr/ */
enum v4l2_colorspace {
/* ITU-R 601 -- broadcast NTSC/PAL */
V4L2_COLORSPACE_SMPTE170M = 1,
/* 1125-Line (US) HDTV */
V4L2_COLORSPACE_SMPTE240M = 2,
/* HD and modern captures. */
V4L2_COLORSPACE_REC709 = 3,
/* broken BT878 extents (601, luma range 16-253 instead of 16-235) */
V4L2_COLORSPACE_BT878 = 4,
/* These should be useful. Assume 601 extents. */
V4L2_COLORSPACE_470_SYSTEM_M = 5,
V4L2_COLORSPACE_470_SYSTEM_BG = 6,
/* I know there will be cameras that send this. So, this is
* unspecified chromaticities and full 0-255 on each of the
* Y'CbCr components
*/
V4L2_COLORSPACE_JPEG = 7,
/* For RGB colourspaces, this is probably a good start. */
V4L2_COLORSPACE_SRGB = 8,
};