【问题描述】在友善之臂视频监控方案源码学习(5) - 输入控制一文中,介绍了input_run完成的功能。本文结合input_run实现的视频采集线程对视频采集进行详细分析。
【解析】
1 涉及到的文件和目录
mjpg-streamer-mini2440-read-only/start_uvc.sh
mjpg-streamer-mini2440-read-only/mjpg_streamer.c
mjpg-streamer-mini2440-read-only/mjpg_streamer.h
mjpg-streamer-mini2440-read-only/plugins/input.h
mjpg-streamer-mini2440-read-only/plugins/input_uvc
2 视频设备初始化
视频设备初始化在input_init函数中完成(mjpg-streamer-mini2440-read-only/plugins/input_uvc/input_uvc.c):
/* open video device and prepare data structure */ if (init_videoIn(videoIn, dev, width, height, fps, format, 1) < 0) { IPRINT("init_VideoIn failed\n"); closelog(); exit(EXIT_FAILURE); }
init_videoIn函数中调用的视频初始化代码如下(mjpg-streamer-mini2440-read-only/plugins/input_uvc/V412uvc.c):
if (init_v4l2 (vd) < 0) { fprintf (stderr, " Init v4L2 failed !! exit fatal \n"); goto error;; }
init_v412代码如下(mjpg-streamer-mini2440-read-only/plugins/input_uvc/V412uvc.c):
static int init_v4l2(struct vdIn *vd) { int i; int ret = 0; if ((vd->fd = open(vd->videodevice, O_RDWR)) == -1) { perror("ERROR opening V4L interface"); return -1; } memset(&vd->cap, 0, sizeof(struct v4l2_capability)); ret = ioctl(vd->fd, VIDIOC_QUERYCAP, &vd->cap); if (ret < 0) { fprintf(stderr, "Error opening device %s: unable to query device.\n", vd->videodevice); goto fatal; } if ((vd->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) { fprintf(stderr, "Error opening device %s: video capture not supported.\n", vd->videodevice); goto fatal;; } if (vd->grabmethod) { if (!(vd->cap.capabilities & V4L2_CAP_STREAMING)) { fprintf(stderr, "%s does not support streaming i/o\n", vd->videodevice); goto fatal; } } else { if (!(vd->cap.capabilities & V4L2_CAP_READWRITE)) { fprintf(stderr, "%s does not support read i/o\n", vd->videodevice); goto fatal; } } /* * set format in */ memset(&vd->fmt, 0, sizeof(struct v4l2_format)); vd->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->fmt.fmt.pix.width = vd->width; vd->fmt.fmt.pix.height = vd->height; vd->fmt.fmt.pix.pixelformat = vd->formatIn; vd->fmt.fmt.pix.field = V4L2_FIELD_ANY; ret = ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt); if (ret < 0) { perror("Unable to set format"); goto fatal; } if ((vd->fmt.fmt.pix.width != vd->width) || (vd->fmt.fmt.pix.height != vd->height)) { fprintf(stderr, " format asked unavailable get width %d height %d \n", vd->fmt.fmt.pix.width, vd->fmt.fmt.pix.height); vd->width = vd->fmt.fmt.pix.width; vd->height = vd->fmt.fmt.pix.height; /* * look the format is not part of the deal ??? */ } if(vd->fmt.fmt.pix.pixelformat!=vd->formatIn) { char fourcc1[5]={0,0,0,0,0}; char fourcc2[5]={0,0,0,0,0}; memmove(fourcc1,(char*)&vd->formatIn,4); memmove(fourcc2,(char*)&vd->fmt.fmt.pix.pixelformat,4); fprintf(stderr, " requested %s but got %s format instead\n",fourcc1,fourcc2); vd->formatIn = vd->fmt.fmt.pix.pixelformat; } /* * set framerate */ struct v4l2_streamparm *setfps; setfps = (struct v4l2_streamparm *) calloc(1, sizeof(struct v4l2_streamparm)); memset(setfps, 0, sizeof(struct v4l2_streamparm)); setfps->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; setfps->parm.capture.timeperframe.numerator = 1; setfps->parm.capture.timeperframe.denominator = vd->fps; ret = ioctl(vd->fd, VIDIOC_S_PARM, setfps); /* * request buffers */ memset(&vd->rb, 0, sizeof(struct v4l2_requestbuffers)); vd->rb.count = NB_BUFFER; vd->rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->rb.memory = V4L2_MEMORY_MMAP; ret = ioctl(vd->fd, VIDIOC_REQBUFS, &vd->rb); if (ret < 0) { perror("Unable to allocate buffers"); goto fatal; } /* * map the buffers */ for (i = 0; i < NB_BUFFER; i++) { memset(&vd->buf, 0, sizeof(struct v4l2_buffer)); vd->buf.index = i; vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->buf.memory = V4L2_MEMORY_MMAP; ret = ioctl(vd->fd, VIDIOC_QUERYBUF, &vd->buf); if (ret < 0) { perror("Unable to query buffer"); goto fatal; } if (debug) fprintf(stderr, "length: %u offset: %u\n", vd->buf.length, vd->buf.m.offset); vd->mem[i] = mmap(0 /* start anywhere */ , vd->buf.length, PROT_READ, MAP_SHARED, vd->fd, vd->buf.m.offset); if (vd->mem[i] == MAP_FAILED) { perror("Unable to map buffer"); goto fatal; } if (debug) fprintf(stderr, "Buffer mapped at address %p.\n", vd->mem[i]); } /* * Queue the buffers. */ for (i = 0; i < NB_BUFFER; ++i) { memset(&vd->buf, 0, sizeof(struct v4l2_buffer)); vd->buf.index = i; vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->buf.memory = V4L2_MEMORY_MMAP; ret = ioctl(vd->fd, VIDIOC_QBUF, &vd->buf); if (ret < 0) { perror("Unable to queue buffer"); goto fatal;; } } return 0; fatal: return -1; }
该部分主要完成了下述功能:
(1) 打开视频设备
if ((vd->fd = open(vd->videodevice, O_RDWR)) == -1) { perror("ERROR opening V4L interface"); return -1; }
(2) 开启捕获功能
memset(&vd->cap, 0, sizeof(struct v4l2_capability)); ret = ioctl(vd->fd, VIDIOC_QUERYCAP, &vd->cap); if (ret < 0) { fprintf(stderr, "Error opening device %s: unable to query device.\n", vd->videodevice); goto fatal; } if ((vd->cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) == 0) { fprintf(stderr, "Error opening device %s: video capture not supported.\n", vd->videodevice); goto fatal;; } if (vd->grabmethod) { if (!(vd->cap.capabilities & V4L2_CAP_STREAMING)) { fprintf(stderr, "%s does not support streaming i/o\n", vd->videodevice); goto fatal; } } else { if (!(vd->cap.capabilities & V4L2_CAP_READWRITE)) { fprintf(stderr, "%s does not support read i/o\n", vd->videodevice); goto fatal; } }
(3) 设置视频格式
/* * set format in */ memset(&vd->fmt, 0, sizeof(struct v4l2_format)); vd->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->fmt.fmt.pix.width = vd->width; vd->fmt.fmt.pix.height = vd->height; vd->fmt.fmt.pix.pixelformat = vd->formatIn; vd->fmt.fmt.pix.field = V4L2_FIELD_ANY; ret = ioctl(vd->fd, VIDIOC_S_FMT, &vd->fmt); if (ret < 0) { perror("Unable to set format"); goto fatal; } if ((vd->fmt.fmt.pix.width != vd->width) || (vd->fmt.fmt.pix.height != vd->height)) { fprintf(stderr, " format asked unavailable get width %d height %d \n", vd->fmt.fmt.pix.width, vd->fmt.fmt.pix.height); vd->width = vd->fmt.fmt.pix.width; vd->height = vd->fmt.fmt.pix.height; /* * look the format is not part of the deal ??? */ } if(vd->fmt.fmt.pix.pixelformat!=vd->formatIn) { char fourcc1[5]={0,0,0,0,0}; char fourcc2[5]={0,0,0,0,0}; memmove(fourcc1,(char*)&vd->formatIn,4); memmove(fourcc2,(char*)&vd->fmt.fmt.pix.pixelformat,4); fprintf(stderr, " requested %s but got %s format instead\n",fourcc1,fourcc2); vd->formatIn = vd->fmt.fmt.pix.pixelformat; }
(4) 设置帧速率
/* * set framerate */ struct v4l2_streamparm *setfps; setfps = (struct v4l2_streamparm *) calloc(1, sizeof(struct v4l2_streamparm)); memset(setfps, 0, sizeof(struct v4l2_streamparm)); setfps->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; setfps->parm.capture.timeperframe.numerator = 1; setfps->parm.capture.timeperframe.denominator = vd->fps; ret = ioctl(vd->fd, VIDIOC_S_PARM, setfps);
(5) 设置缓存
/* * request buffers */ memset(&vd->rb, 0, sizeof(struct v4l2_requestbuffers)); vd->rb.count = NB_BUFFER; vd->rb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->rb.memory = V4L2_MEMORY_MMAP; ret = ioctl(vd->fd, VIDIOC_REQBUFS, &vd->rb); if (ret < 0) { perror("Unable to allocate buffers"); goto fatal; } /* * map the buffers */ for (i = 0; i < NB_BUFFER; i++) { memset(&vd->buf, 0, sizeof(struct v4l2_buffer)); vd->buf.index = i; vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->buf.memory = V4L2_MEMORY_MMAP; ret = ioctl(vd->fd, VIDIOC_QUERYBUF, &vd->buf); if (ret < 0) { perror("Unable to query buffer"); goto fatal; } if (debug) fprintf(stderr, "length: %u offset: %u\n", vd->buf.length, vd->buf.m.offset); vd->mem[i] = mmap(0 /* start anywhere */ , vd->buf.length, PROT_READ, MAP_SHARED, vd->fd, vd->buf.m.offset); if (vd->mem[i] == MAP_FAILED) { perror("Unable to map buffer"); goto fatal; } if (debug) fprintf(stderr, "Buffer mapped at address %p.\n", vd->mem[i]); } /* * Queue the buffers. */ for (i = 0; i < NB_BUFFER; ++i) { memset(&vd->buf, 0, sizeof(struct v4l2_buffer)); vd->buf.index = i; vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->buf.memory = V4L2_MEMORY_MMAP; ret = ioctl(vd->fd, VIDIOC_QBUF, &vd->buf); if (ret < 0) { perror("Unable to queue buffer"); goto fatal;; }
3 视频采集
在input_run中调用了视频采集的线程函数,如下所示:
int input_run(void) { pglobal->buf = malloc(videoIn->framesizeIn); if (pglobal->buf == NULL) { fprintf(stderr, "could not allocate memory\n"); exit(EXIT_FAILURE); } pthread_create(&cam, 0, cam_thread, NULL); pthread_detach(cam); return 0; }
cam_thread定义如下:
void *cam_thread( void *arg ) { /* set cleanup handler to cleanup allocated ressources */ pthread_cleanup_push(cam_cleanup, NULL); while( !pglobal->stop ) { /* grab a frame */ if( uvcGrab(videoIn) < 0 ) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); } DBG("received frame of size: %d\n", videoIn->buf.bytesused); /* * Workaround for broken, corrupted frames: * Under low light conditions corrupted frames may get captured. * The good thing is such frames are quite small compared to the regular pictures. * For example a VGA (640x480) webcam picture is normally >= 8kByte large, * corrupted frames are smaller. */ if ( videoIn->buf.bytesused < minimum_size ) { DBG("dropping too small frame, assuming it as broken\n"); continue; } /* copy JPG picture to global buffer */ pthread_mutex_lock( &pglobal->db ); /* * If capturing in YUV mode convert to JPEG now. * This compression requires many CPU cycles, so try to avoid YUV format. * Getting JPEGs straight from the webcam, is one of the major advantages of * Linux-UVC compatible devices. */ if (videoIn->formatIn != V4L2_PIX_FMT_MJPEG) { DBG("compressing frame\n"); pglobal->size = compress_yuyv_to_jpeg(videoIn, pglobal->buf, videoIn->framesizeIn, gquality, videoIn->formatIn); } else { DBG("copying frame\n"); pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer, videoIn->buf.bytesused); } #if 0 /* motion detection can be done just by comparing the picture size, but it is not very accurate!! */ if ( (prev_size - global->size)*(prev_size - global->size) > 4*1024*1024 ) { DBG("motion detected (delta: %d kB)\n", (prev_size - global->size) / 1024); } prev_size = global->size; #endif /* signal fresh_frame */ pthread_cond_broadcast(&pglobal->db_update); pthread_mutex_unlock( &pglobal->db ); DBG("waiting for next frame\n"); /* only use usleep if the fps is below 5, otherwise the overhead is too long */ if ( videoIn->fps < 5 ) { usleep(1000*1000/videoIn->fps); } } DBG("leaving input thread, calling cleanup function now\n"); pthread_cleanup_pop(1); return NULL; }
该函数完成下述任务:
(1) 视频抓取
while( !pglobal->stop ) { /* grab a frame */ if( uvcGrab(videoIn) < 0 ) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); }
uvcGrab函数实现如下:
int uvcGrab(struct vdIn *vd) { #define HEADERFRAME1 0xaf int ret; if (!vd->isstreaming) if (video_enable(vd)) goto err; memset(&vd->buf, 0, sizeof(struct v4l2_buffer)); vd->buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; vd->buf.memory = V4L2_MEMORY_MMAP; ret = ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf); if (ret < 0) { perror("Unable to dequeue buffer"); goto err; } switch (vd->formatIn) { case V4L2_PIX_FMT_MJPEG: if (vd->buf.bytesused <= HEADERFRAME1) { /* Prevent crash * on empty image */ fprintf(stderr, "Ignoring empty buffer ...\n"); return 0; } /* memcpy(vd->tmpbuffer, vd->mem[vd->buf.index], vd->buf.bytesused); memcpy (vd->tmpbuffer, vd->mem[vd->buf.index], HEADERFRAME1); memcpy (vd->tmpbuffer + HEADERFRAME1, dht_data, sizeof(dht_data)); memcpy (vd->tmpbuffer + HEADERFRAME1 + sizeof(dht_data), vd->mem[vd->buf.index] + HEADERFRAME1, (vd->buf.bytesused - HEADERFRAME1)); */ memcpy(vd->tmpbuffer, vd->mem[vd->buf.index], vd->buf.bytesused); if (debug) fprintf(stderr, "bytes in used %d \n", vd->buf.bytesused); break; case V4L2_PIX_FMT_YUYV: default: if (vd->buf.bytesused > vd->framesizeIn) memcpy (vd->framebuffer, vd->mem[vd->buf.index], (size_t) vd->framesizeIn); else memcpy (vd->framebuffer, vd->mem[vd->buf.index], (size_t) vd->buf.bytesused); break; //goto err; break; } ret = ioctl(vd->fd, VIDIOC_QBUF, &vd->buf); if (ret < 0) { perror("Unable to requeue buffer"); goto err; } return 0; err: vd->signalquit = 0; return -1; }
该函数主要完成了下述功能:
(a) 使能视频设备
if (!vd->isstreaming) if (video_enable(vd)) goto err;
实质上是调用了下述方法:
ret = ioctl(vd->fd, VIDIOC_STREAMON, &type); if (ret < 0) { perror("Unable to start capture"); return ret; }
(b) 视频采集
ret = ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf); if (ret < 0) { perror("Unable to dequeue buffer"); goto err; }
注:采集的视频信息放在vd->buf中。
(2) 视频压缩编码
if (videoIn->formatIn != V4L2_PIX_FMT_MJPEG) { DBG("compressing frame\n"); pglobal->size = compress_yuyv_to_jpeg(videoIn, pglobal->buf, videoIn->framesizeIn, gquality, videoIn->formatIn); } else { DBG("copying frame\n"); pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer, videoIn->buf.bytesused); }
此过程,后续文章进行详述。
4 视频的存储
视频采集的信息存储在vd->buf中,vd->buf怎么和global->buf联系起来的呢?
(1) input_init初始化全局指针
pglobal = param->global;
param->global指针指向mjpg-streamer-mini2440-read-only/mjpg_streamer.c主程序main中的global全局指针。
(2) input_run中分配空间
pglobal->buf = malloc(videoIn->framesizeIn); if (pglobal->buf == NULL) { fprintf(stderr, "could not allocate memory\n"); exit(EXIT_FAILURE); }
(3) cam_thread抓取
while( !pglobal->stop ) { /* grab a frame */ if( uvcGrab(videoIn) < 0 ) { IPRINT("Error grabbing frames\n"); exit(EXIT_FAILURE); }
抓取的数据存储在videoIn结构中:
int uvcGrab(struct vdIn *vd) { ... ret = ioctl(vd->fd, VIDIOC_DQBUF, &vd->buf); if (ret < 0) { perror("Unable to dequeue buffer"); goto err; } }
(4) global->buf和videoIn->buf的联系
if (videoIn->formatIn != V4L2_PIX_FMT_MJPEG) { DBG("compressing frame\n"); pglobal->size = compress_yuyv_to_jpeg(videoIn, pglobal->buf, videoIn->framesizeIn, gquality, videoIn->formatIn); } else { DBG("copying frame\n"); pglobal->size = memcpy_picture(pglobal->buf, videoIn->tmpbuffer, videoIn->buf.bytesused); }
最终,通过压缩编码将videoIn->buf的数据存储在global->buf中。
【源码下载】
http://download.csdn.net/detail/tandesir/4915905
转载请标明出处,仅供学习交流,勿用于商业目的
Copyright @ http://blog.csdn.net/tandesir