3.1 mxc_v4l2_capture.c分析---probe函数分析

mxc_v4l2_capture.c函数提供了应用程序中的一些接口函数,所以从分析它开始:

(一)看一个驱动程序从它的入口函数开始:

module_init(camera_init);

static __init int camera_init(void) 
{ 
	u8 err = 0; 

	pr_debug("In MVC:camera_init\n"); 

	/* Register the device driver structure. */ 
	err = platform_driver_register(&mxc_v4l2_driver); 
	if (err != 0) { 
		pr_err("ERROR: v4l2 capture:camera_init: " 
			"platform_driver_register failed.\n"); 
		return err; 
	} 

	return err; 
}

它里面就一个重要的函数,platform_driver_register(&mxc_v4l2_driver);通过调用这个函数,将mxc_v4l2_driver这个驱动注册到platform总线上面,然后当总线中有对应的设备的时候,就会调用这个mxc_v4l2_driver结构体里面的的probe函数mxc_v4l2_probemxc_v4l2_driver结构体如下所示:

static struct platform_driver mxc_v4l2_driver = { 
	.driver = { 
		   .name = "mxc_v4l2_capture", 
		   .owner = THIS_MODULE, 
		   .of_match_table = mxc_v4l2_dt_ids, 
		   }, 
	.id_table = imx_v4l2_devtype, 
	.probe = mxc_v4l2_probe, 
	.remove = mxc_v4l2_remove, 
	.suspend = mxc_v4l2_suspend, 
	.resume = mxc_v4l2_resume, 
	.shutdown = NULL, 
};


(二) mxc_v4l2_probe函数

static int mxc_v4l2_probe(struct platform_device *pdev) 
{ 
	/* Create cam and initialize it. */ 
	cam_data *cam = kmalloc(sizeof(cam_data), GFP_KERNEL); 
	if (cam == NULL) { 
		pr_err("ERROR: v4l2 capture: failed to register camera\n"); 
		return -1; 
	} 

	init_camera_struct(cam, pdev); 
	pdev->dev.release = camera_platform_release; 

	/* Set up the v4l2 device and register it*/ 
	cam->self->priv = cam; 
	v4l2_int_device_register(cam->self); 
 
	/* register v4l video device */ 
	if (video_register_device(cam->video_dev, VFL_TYPE_GRABBER, video_nr) 
		< 0) { 
		kfree(cam); 
		cam = NULL; 
		pr_err("ERROR: v4l2 capture: video_register_device failed\n"); 
		return -1; 
	} 
	pr_debug("   Video device registered: %s #%d\n", 
		 cam->video_dev->name, cam->video_dev->minor); 

	if (device_create_file(&cam->video_dev->dev, 
			&dev_attr_fsl_v4l2_capture_property)) 
		dev_err(&pdev->dev, "Error on creating sysfs file" 
			" for capture\n"); 

	if (device_create_file(&cam->video_dev->dev, 
			&dev_attr_fsl_v4l2_overlay_property)) 
		dev_err(&pdev->dev, "Error on creating sysfs file" 
			" for overlay\n"); 

	if (device_create_file(&cam->video_dev->dev, 
			&dev_attr_fsl_csi_property)) 
		dev_err(&pdev->dev, "Error on creating sysfs file" 
			" for csi number\n"); 

	return 0; 
}

这个函数首先为cam_data*cam分配内存,然后就调用init_camera_struct(cam,pdev);函数来对cam结构体进行初始化。


2.1 init_camera_struct函数

static int init_camera_struct(cam_data *cam, struct platform_device *pdev) 
{ 
	const struct of_device_id *of_id = 
			of_match_device(mxc_v4l2_dt_ids, &pdev->dev); 
	struct device_node *np = pdev->dev.of_node; 
	int ipu_id, csi_id, mclk_source; 
	int ret = 0; 
	struct v4l2_device *v4l2_dev; 

	pr_debug("In MVC: init_camera_struct\n"); 

	ret = of_property_read_u32(np, "ipu_id", &ipu_id); 
	if (ret) { 
		dev_err(&pdev->dev, "ipu_id missing or invalid\n"); 
		return ret; 
	} 

	ret = of_property_read_u32(np, "csi_id", &csi_id); 
	if (ret) { 
		dev_err(&pdev->dev, "csi_id missing or invalid\n"); 
		return ret; 
	} 

	ret = of_property_read_u32(np, "mclk_source", &mclk_source); 
	if (ret) { 
		dev_err(&pdev->dev, "sensor mclk missing or invalid\n"); 
		return ret; 
	} 

	/* Default everything to 0 */ 
	memset(cam, 0, sizeof(cam_data)); 

	/* get devtype to distinguish if the cpu is imx5 or imx6 
	 * IMX5_V4L2 specify the cpu is imx5 
	 * IMX6_V4L2 specify the cpu is imx6q or imx6sdl 
	 */ 
	if (of_id) 
		pdev->id_entry = of_id->data; 
	cam->devtype = pdev->id_entry->driver_data; 

	cam->ipu = ipu_get_soc(ipu_id); 
	if (cam->ipu == NULL) { 
		pr_err("ERROR: v4l2 capture: failed to get ipu\n"); 
		return -EINVAL; 
	} else if (cam->ipu == ERR_PTR(-ENODEV)) { 
		pr_err("ERROR: v4l2 capture: get invalid ipu\n"); 
		return -ENODEV; 
	} 

	init_MUTEX(&cam->param_lock); 
	init_MUTEX(&cam->busy_lock); 

	cam->video_dev = video_device_alloc(); 
	if (cam->video_dev == NULL) 
		return -ENODEV; 

	*(cam->video_dev) = mxc_v4l_template; 

	video_set_drvdata(cam->video_dev, cam); 
	dev_set_drvdata(&pdev->dev, (void *)cam); 
	cam->video_dev->minor = -1; 

	v4l2_dev = kzalloc(sizeof(*v4l2_dev), GFP_KERNEL); 
	if (!v4l2_dev) { 
		dev_err(&pdev->dev, "failed to allocate v4l2_dev structure\n"); 
		video_device_release(cam->video_dev); 
		return -ENOMEM; 
	} 

	if (v4l2_device_register(&pdev->dev, v4l2_dev) < 0) { 
		dev_err(&pdev->dev, "register v4l2 device failed\n"); 
		video_device_release(cam->video_dev); 
		kfree(v4l2_dev); 
		return -ENODEV; 
	} 
	cam->video_dev->v4l2_dev = v4l2_dev; 

	init_waitqueue_head(&cam->enc_queue); 
	init_waitqueue_head(&cam->still_queue); 

	/* setup cropping */ 
	cam->crop_bounds.left = 0; 
	cam->crop_bounds.width = 640; 
	cam->crop_bounds.top = 0; 
	cam->crop_bounds.height = 480; 
	cam->crop_current = cam->crop_defrect = cam->crop_bounds; 
	ipu_csi_set_window_size(cam->ipu, cam->crop_current.width, 
				cam->crop_current.height, cam->csi); 
	ipu_csi_set_window_pos(cam->ipu, cam->crop_current.left, 
				cam->crop_current.top, cam->csi); 
	cam->streamparm.parm.capture.capturemode = 0; 

	cam->standard.index = 0; 
	cam->standard.id = V4L2_STD_UNKNOWN; 
	cam->standard.frameperiod.denominator = 30; 
	cam->standard.frameperiod.numerator = 1; 
	cam->standard.framelines = 480; 
	cam->standard_autodetect = true; 
	cam->streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; 
	cam->streamparm.parm.capture.timeperframe = cam->standard.frameperiod; 
	cam->streamparm.parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 
	cam->overlay_on = false; 
	cam->capture_on = false; 
	cam->v4l2_fb.flags = V4L2_FBUF_FLAG_OVERLAY; 

	cam->v2f.fmt.pix.sizeimage = 352 * 288 * 3 / 2; 
	cam->v2f.fmt.pix.bytesperline = 288 * 3 / 2; 
	cam->v2f.fmt.pix.width = 288; 
	cam->v2f.fmt.pix.height = 352; 
	cam->v2f.fmt.pix.pixelformat = V4L2_PIX_FMT_YUV420; 
	cam->win.w.width = 160; 
	cam->win.w.height = 160; 
	cam->win.w.left = 0; 
	cam->win.w.top = 0; 

	cam->ipu_id = ipu_id; 
	cam->csi = csi_id; 
	cam->mclk_source = mclk_source; 
	cam->mclk_on[cam->mclk_source] = false; 

	cam->enc_callback = camera_callback; 
	init_waitqueue_head(&cam->power_queue); 
	spin_lock_init(&cam->queue_int_lock); 
	spin_lock_init(&cam->dqueue_int_lock); 

	cam->self = kmalloc(sizeof(struct v4l2_int_device), GFP_KERNEL); 
	cam->self->module = THIS_MODULE; 
	sprintf(cam->self->name, "mxc_v4l2_cap%d", cam->csi); 
	cam->self->type = v4l2_int_type_master; 
	cam->self->u.master = &mxc_v4l2_master; 

	return 0; 
}

2.1.1 首先通过of_match_device函数根据pdev->dev里面的of_node项,从mxc_v4l2_dt_ids数组里面找出最匹配的一项of_device_id,然后将这个值赋给of_id。在这里of_id= {

.compatible= "fsl,imx6q-v4l2-capture",

.data= &imx_v4l2_devtype[IMX6_V4L2],

},


2.1.2 然后通过三个of_property_read_u32函数,来分别读取pdev->dev.of_node里面的"ipu_id""csi_id""mclk_source"的值,并将他们分别保存在ipu_idcsi_id以及mclk_source中。之后调用memsetcam_data*cam中的其他变量设置为0.

然后通过pdev->id_entry= of_id->data;pdev中的id_entry项设置为of_id->data。在这里就是pdev->id_entry= &imx_v4l2_devtype[IMX6_V4L2].

然后就是cam->devtype=pdev->id_entry->driver_data;通过上一步,刚把pdev里面的id_entry赋值,所以这一步,就是将cam->devtype的值设置为IMX6_V4L2


2.1.3 之后就是cam->ipu= ipu_get_soc(ipu_id); ipu_get_soc这个函数在ipu_common.c中定义,如下所示:

struct ipu_soc *ipu_get_soc(int id) 
{ 
	if (id >= MXC_IPU_MAX_NUM) 
		return ERR_PTR(-ENODEV); 
	else if (!ipu_array[id].online) 
		return ERR_PTR(-ENODEV); 
	else 
		return &(ipu_array[id]); 
} 
EXPORT_SYMBOL_GPL(ipu_get_soc);

其中这个ipu_array数组可以搜索看到,是在前面定义的一个全局数组,如下:

static struct ipu_soc ipu_array[MXC_IPU_MAX_NUM];
#define MXC_IPU_MAX_NUM		2

目前为止,最多有2ipuipu_get_soc函数从这个ipu_array数组中根据ipu_id取出相应的ipu。然后对ipu_id进行判断,如果它大于ipu的最大数目,就直接返回。然后判断ipu_array[id].online这一项,我们可以在ipu_soc结构体里面看到这一项,他是一个bool类型的变量,判断系统是否正在占用这一个ipu,如果系统正在占用这一个ipu的话,就只能返回了,如果以上两点都满足的话,就返回找到的这个ipu_soc结构体的地址。可以看这个cam_data结构体,它里面有一项成员是void*ipu,所以这个ipu_get_soc返回的是(structipu_soc *)类型。在本例中就是返回&(ipu_array[id])

在这个ipu_soc函数中,它会返回成功的地址或者-ENODEV出错提示,在init_camera_struct函数中,它对这个函数的返回值进行了判断,如下所示:

	cam->ipu = ipu_get_soc(ipu_id); 
	if (cam->ipu == NULL) { 
		pr_err("ERROR: v4l2 capture: failed to get ipu\n"); 
		return -EINVAL; 
	} else if (cam->ipu == ERR_PTR(-ENODEV)) { 
		pr_err("ERROR: v4l2 capture: get invalid ipu\n"); 
		return -ENODEV; 
	}


2.1.4 之后就是初始化一些互斥锁,为cam结构体里面的video_dev分配内存,然后通过*(cam->video_dev)= mxc_v4l_template;video_device结构体指向了mxc_v4l_templatemxc_v4l_template如下所示:

static struct video_device mxc_v4l_template = { 
	.name = "Mxc Camera", 
	.fops = &mxc_v4l_fops, 
	.release = video_device_release, 
};
static struct v4l2_file_operations mxc_v4l_fops = { 
	.owner = THIS_MODULE, 
	.open = mxc_v4l_open, 
	.release = mxc_v4l_close, 
	.read = mxc_v4l_read, 
	.ioctl = mxc_v4l_ioctl, 
	.mmap = mxc_mmap, 
	.poll = mxc_poll, 
};

通过这一步,就为用户空间提供了接口函数,当用户空间调用open,read, poll等函数的时候,就会调用到mxc_v4l_open,mxc_v4l_read, mxc_v4l_poll等函数。这些函数都在mxc_v4l2_capture.c里面提供了,后面再分析。


2.1.5 然后通过

video_set_drvdata(cam->video_dev, cam); 
dev_set_drvdata(&pdev->dev, (void *)cam);

这两个函数,分别将cam结构体设置为cam->video_dev&pdev->dev里面的私有数据。即将cam->video_dev->dev->p->driver_data指向cam和将&pdev->dev->p->driver_data指向cam

这两个函数就不具体分析了。


2.1.6 然后将cam->video_dev->minor设置为-1;structvideo_device结构体中有这样的注释:

/*'minor' is set to -1 if the registration failed */

因为我们现在在init_camera_struct函数中,初始化的时候也把这个值设置成初始值。


2.1.7 之后就是为v4l2_dev结构体分配内存并清零,调用了v4l2_device_register(&pdev->dev,v4l2_dev)函数,这个函数将v4l2_dev这个结构体里面的一些成员函数进行了初始化,然后将pdev->dev->p指向v4l2_dev,然后通过cam->video_dev->v4l2_dev= v4l2_dev;v4l2_dev添加到cam->video_dev结构体里面。


2.1.8 通过

init_waitqueue_head(&cam->enc_queue); 
init_waitqueue_head(&cam->still_queue);

这两条语句,初始化cam结构体里面的这两个队列头,这两个队列在用到的时候再分析。


2.1.9 之后就是设置cam_data结构体里面的一些其他参数的初始值:

        cam->crop_bounds.left = 0; 
	cam->crop_bounds.width = 640; 
	cam->crop_bounds.top = 0; 
	cam->crop_bounds.height = 480;

首先设置的是crop_bounds的值,也就是图像裁剪的大小和位置,这个结构体是v4l2_rect类型的,

struct v4l2_rect { 
	__s32   left; 
	__s32   top; 
	__u32   width; 
	__u32   height; 
};

为什么这个结构体采用这四个值呢?可以想象一下,如果想要裁剪显示屏幕上任意一块区域,只需要知道这一块区域的左上角坐标,同时知道区域的宽度和高度,这块区域其他3个点的坐标位置就可以通过这几个值计算出来。

然后通过cam->crop_current= cam->crop_defrect =cam->crop_bounds;这条语句将cam_data结构体里面其他两个相似的成员crop_currentcrop_defrect赋值。将这三个值都初始化为左上角坐标为(00),区域大小都为640* 480

之后通过ipu_csi_set_window_size(cam->ipu,cam->crop_current.width,

cam->crop_current.height,cam->csi);

函数来设置显示窗口的大小范围,这个ipu_csi_set_window_size函数在ipu_capture.c中定义,先简略分析一下,核心就是下面这个函数:

ipu_csi_write(ipu,csi, (width - 1) | (height - 1) << 16, CSI_ACT_FRM_SIZE);

--->writel(value, ipu->csi_reg[csi] + offset);

其中value= (width - 1) | (height - 1) << 16, offset = CSI_ACT_FRM_SIZE,大致意思是根据cam->ipucam->csi(这两个变量之前刚设定好)将(width- 1) | (height - 1) <<16的值写到csi寄存器里面,这个csi寄存器的初始地址在通过ipu_get_soc函数获取ipu的时候已经获取到了,CSI_ACT_FRM_SIZE就是关于csi寄存器初始地址的偏移值,它在drivers/mxc/ipu3/ipu_regs.h中定义。

之后就是ipu_csi_set_window_pos(cam->ipu,cam->crop_current.left,

cam->crop_current.top,cam->csi);

函数,这个函数同样在ipu_capture.c中定义,它会根据ipucsi的值,设置底层的寄存器的值。


即上面设置了crop_bounds4个初始值,然后就调用ipu_csi_set_window_sizeipu_csi_set_window_pos将这四个值写到底层的寄存器中去。


2.1.10 下面就是继续设置cam_data结构体里面成员初始值,分别设置了streamparmstandardstandard_autodetectoverlay_oncapture_onv2fwin的值。这些就不分析了,后面用到的话再说。

然后继续设置其他值:

	cam->ipu_id = ipu_id; 
	cam->csi = csi_id; 
	cam->mclk_source = mclk_source; 
	cam->mclk_on[cam->mclk_source] = false;

比较重要的是下面这个:

cam->enc_callback=camera_callback;设置cam_data结构体中的enc_callback回调函数,这个函数在本文件中定义,它是一个中断处理函数,在视频采集过程中,如果一个buffer填充满的话,会产生一个中断信号,中断处理函数中最终会调用到这个函数来处理中断。


2.1.11 下面就是设置cam_data中的self结构体,self结构体为structv4l2_int_device类型的,

	cam->self = kmalloc(sizeof(struct v4l2_int_device), GFP_KERNEL); 
	cam->self->module = THIS_MODULE; 
	sprintf(cam->self->name, "mxc_v4l2_cap%d", cam->csi); 
	cam->self->type = v4l2_int_type_master; 
	cam->self->u.master = &mxc_v4l2_master;

首先分配内存,将module指向THIS_MODULE,然后将名字根据csi号设置为mxc_v4l2_cap%d的类型,分别设置selftypeu.master类型。将这个cam->self结构体作为一个master设备注册到int_list链表中。


至此,这个cam_data结构体初始化完毕了,这个结构体toobig了~~~


2.2 下面我们返回probe函数中继续分析:

pdev->dev.release= camera_platform_release;

device结构体中指定release函数,但是这个函数中什么都没有做,如果不添加这个函数的话就会报错,所以就添加一个空函数进去。


2.3 下面是

cam->self->priv= cam;

v4l2_int_device_register(cam->self);

cam_data结构体中self变量是v4l2_int_device类型的,首先将cam保存为v4l2_int_deviceself的私有数据,然后在操作cam->self结构体的时候,在很多函数中,行参只是v4l2_int_device类型的self,相要获得更外层的cam_data结构体的话,就可以从self->priv中获取。然后调用v4l2_int_device_register函数来注册v4l2_int_device

这个函数在v4l2-int-device.c中定义,如下所示:

int v4l2_int_device_register(struct v4l2_int_device *d) 
{ 
	if (d->type == v4l2_int_type_slave) 
		sort(d->u.slave->ioctls, d->u.slave->num_ioctls,  /* 按照序号存储,加快访问速度。*/ 
		     sizeof(struct v4l2_int_ioctl_desc), 
		     &ioctl_sort_cmp, NULL); 
	mutex_lock(&mutex); 
	list_add(&d->head, &int_list); //无论是slave还是master都会添加到int_list中 
	v4l2_int_device_try_attach_all(); //都会做匹配动作 
	mutex_unlock(&mutex); 

	return 0; 
} 
EXPORT_SYMBOL_GPL(v4l2_int_device_register);

在前一节中已经具体分析这个v4l2设备中masterslave的注册过程了,在这里就不再具体分析。


2.4下面是这个probe函数的核心函数:video_register_device它在v4l2-dev.h中定义,如下所示:

static inline int __must_check video_register_device_no_warn( 
		struct video_device *vdev, int type, int nr) 
{ 
	return __video_register_device(vdev, type, nr, 0, vdev->fops->owner); 
}

__video_register_devicev4l2-dev.c中定义:(就直接在代码中注释了)

int __video_register_device(struct video_device *vdev, int type, int nr, 
		int warn_if_nr_in_use, struct module *owner) 
{ 
	int i = 0; 
	int ret; 
	int minor_offset = 0; 
	int minor_cnt = VIDEO_NUM_DEVICES; 
	const char *name_base; 

	/* A minor value of -1 marks this video device as never 
	   having been registered */ 
	vdev->minor = -1; 

	/* the release callback MUST be present */ 
	if (WARN_ON(!vdev->release)) 
		return -EINVAL; 

/*如果没有提供这个release函数的话,就直接返回错误,那么我们在哪初始化了它呢?在上面的init_camera_struct(cam,pdev);函数中,通过下面这个语句*(cam->video_dev)= mxc_v4l_template;来为它指定release函数了,在mxc_v4l_template结构体中

.release= video_device_release*/

caseVFL_TYPE_GRABBER:

/* the v4l2_dev pointer MUST be present */ 
	if (WARN_ON(!vdev->v4l2_dev)) 
		return -EINVAL;

/*这个v4l2_dev指针也是必须要提供的,它同样在init_camera_struct函数中提供了*/

	/* v4l2_fh support */ 
	spin_lock_init(&vdev->fh_lock); 
	INIT_LIST_HEAD(&vdev->fh_list); 

	/* Part 1: check device type */ 
	switch (type) { 
	case VFL_TYPE_GRABBER: 
		name_base = "video"; 
		break; 
	case VFL_TYPE_VBI: 
		name_base = "vbi"; 
		break; 
	case VFL_TYPE_RADIO: 
		name_base = "radio"; 
		break; 
	case VFL_TYPE_SUBDEV: 
		name_base = "v4l-subdev"; 
		break; 
	default: 
		printk(KERN_ERR "%s called with unknown type: %d\n", 
		       __func__, type); 
		return -EINVAL; 
	} 

/*根据传进来的type参数,确定设备在/dev目录下看到的名字*/

	vdev->vfl_type = type; 
	vdev->cdev = NULL; 
	if (vdev->v4l2_dev) { 
		if (vdev->v4l2_dev->dev) 
			vdev->parent = vdev->v4l2_dev->dev; 
		if (vdev->ctrl_handler == NULL) 
			vdev->ctrl_handler = vdev->v4l2_dev->ctrl_handler; 
		/* If the prio state pointer is NULL, then use the v4l2_device 
		   prio state. */ 
		if (vdev->prio == NULL) 
			vdev->prio = &vdev->v4l2_dev->prio; 
	} 

/*进行vdev中父设备和ctrl处理函数的初始化。*/

	/* Part 2: find a free minor, device node number and device index. */ 
#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES 
	/* Keep the ranges for the first four types for historical 
	 * reasons. 
	 * Newer devices (not yet in place) should use the range 
	 * of 128-191 and just pick the first free minor there 
	 * (new style). */ 
	switch (type) { 
	case VFL_TYPE_GRABBER: 
		minor_offset = 0; 
		minor_cnt = 64; 
		break; 
	case VFL_TYPE_RADIO: 
		minor_offset = 64; 
		minor_cnt = 64; 
		break; 
	case VFL_TYPE_VBI: 
		minor_offset = 224; 
		minor_cnt = 32; 
		break; 
	default: 
		minor_offset = 128; 
		minor_cnt = 64; 
		break; 
	} 
#endif 

	/* Pick a device node number */ 
	mutex_lock(&videodev_lock); 
	nr = devnode_find(vdev, nr == -1 ? 0 : nr, minor_cnt); 
	if (nr == minor_cnt) 
		nr = devnode_find(vdev, 0, minor_cnt); 
	if (nr == minor_cnt) { 
		printk(KERN_ERR "could not get a free device node number\n"); 
		mutex_unlock(&videodev_lock); 
		return -ENFILE; 
	} 
#ifdef CONFIG_VIDEO_FIXED_MINOR_RANGES 
	/* 1-on-1 mapping of device node number to minor number */ 
	i = nr; 
#else 
	/* The device node number and minor numbers are independent, so 
	   we just find the first free minor number. */ 
	for (i = 0; i < VIDEO_NUM_DEVICES; i++) 
		if (video_device[i] == NULL) 
			break; 
	if (i == VIDEO_NUM_DEVICES) { 
		mutex_unlock(&videodev_lock); 
		printk(KERN_ERR "could not get a free minor\n"); 
		return -ENFILE; 
	} 
#endif 
	vdev->minor = i + minor_offset; 
	vdev->num = nr; 
	devnode_set(vdev); 

	/* Should not happen since we thought this minor was free */ 
	WARN_ON(video_device[vdev->minor] != NULL); 
	vdev->index = get_index(vdev); 
	video_device[vdev->minor] = vdev;
	mutex_unlock(&videodev_lock); 

	if (vdev->ioctl_ops) 
		determine_valid_ioctls(vdev);

/*上面的part2就是确定设备的次设备号*/

	/* Part 3: Initialize the character device */ 
	vdev->cdev = cdev_alloc(); 
	if (vdev->cdev == NULL) { 
		ret = -ENOMEM; 
		goto cleanup; 
	} 

/*在这进行设备的注册,用cdev_alloc函数,从这我们就可以看出来,它是一个普通的字符设备驱动,然后设置它的一些参数。怎么就是字符设备驱动了???这个通过v4l2框架中可以看出来。*/

	vdev->cdev->ops = &v4l2_fops; 

/*cdev结构体里面的ops指向了v4l2_fops这个结构体,这个v4l2_fops结构体也是在v4l2-dev.c这个文件中。它是一个file_operations操作函数集,

static const struct file_operations v4l2_fops = { 
	.owner = THIS_MODULE, 
	.read = v4l2_read, 
	.write = v4l2_write, 
	.open = v4l2_open, 
	.get_unmapped_area = v4l2_get_unmapped_area, 
	.mmap = v4l2_mmap, 
	.unlocked_ioctl = v4l2_ioctl, 
#ifdef CONFIG_COMPAT 
	.compat_ioctl = v4l2_compat_ioctl32, 
#endif 
	.release = v4l2_release, 
	.poll = v4l2_poll, 
	.llseek = no_llseek, 
};

仔细分析上面那些子函数的话,就可以发现他们最终还是调用的是cam->video_dev所提供的函数,即

mxc_v4l_template结构体里面的mxc_v4l_fops

*/

	vdev->cdev->owner = owner; 
	ret = cdev_add(vdev->cdev, MKDEV(VIDEO_MAJOR, vdev->minor), 1); 
	if (ret < 0) { 
		printk(KERN_ERR "%s: cdev_add failed\n", __func__); 
		kfree(vdev->cdev); 
		vdev->cdev = NULL; 
		goto cleanup; 
	} 


	/* Part 4: register the device with sysfs */ 
	vdev->dev.class = &video_class; 
	vdev->dev.devt = MKDEV(VIDEO_MAJOR, vdev->minor); 
	if (vdev->parent) 
		vdev->dev.parent = vdev->parent; 
	dev_set_name(&vdev->dev, "%s%d", name_base, vdev->num); 
	ret = device_register(&vdev->dev); 
	if (ret < 0) { 
		printk(KERN_ERR "%s: device_register failed\n", __func__); 
		goto cleanup; 
	} 
	/* Register the release callback that will be called when the last 
	   reference to the device goes away. */ 
	vdev->dev.release = v4l2_device_release; 

	if (nr != -1 && nr != vdev->num && warn_if_nr_in_use) 
		printk(KERN_WARNING "%s: requested %s%d, got %s\n", __func__, 
			name_base, nr, video_device_node_name(vdev)); 

	/* Increase v4l2_device refcount */ 
	if (vdev->v4l2_dev) 
		v4l2_device_get(vdev->v4l2_dev); 
/* 在sysfs中创建类,在类下创建设备结点 */

#if defined(CONFIG_MEDIA_CONTROLLER) 
	/* Part 5: Register the entity. */ 
	if (vdev->v4l2_dev && vdev->v4l2_dev->mdev && 
	    vdev->vfl_type != VFL_TYPE_SUBDEV) { 
		vdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; 
		vdev->entity.name = vdev->name; 
		vdev->entity.info.v4l.major = VIDEO_MAJOR; 
		vdev->entity.info.v4l.minor = vdev->minor; 
		ret = media_device_register_entity(vdev->v4l2_dev->mdev, 
			&vdev->entity); 
		if (ret < 0) 
			printk(KERN_WARNING 
			       "%s: media_device_register_entity failed\n", 
			       __func__); 
	} 
#endif 

/*创建实体entity,这一步并不是必须的,需要配置了CONFIG_MEDIA_CONTROLLER选项后才会执行这一步,在这一步里面有一个media_entity实体结构体,在后面再分析它。*/

	/* Part 6: Activate this minor. The char device can now be used. */ 
	set_bit(V4L2_FL_REGISTERED, &vdev->flags); 

/*设置标志位*/

	mutex_lock(&videodev_lock); 
	video_device[vdev->minor] = vdev; 

/*将设置好的video_device结构体vdev按照次设备号保存到video_device数组中。这个数组是在前面staticstruct video_device *video_device[VIDEO_NUM_DEVICES];定义的。*/

	mutex_unlock(&videodev_lock); 

	return 0; 

cleanup: 
	mutex_lock(&videodev_lock); 
	if (vdev->cdev) 
		cdev_del(vdev->cdev); 
	devnode_clear(vdev); 
	mutex_unlock(&videodev_lock); 
	/* Mark this video device as never having been registered. */ 
	vdev->minor = -1; 
	return ret; 
} 
EXPORT_SYMBOL(__video_register_device);


2.5 下面通过3次调用device_create_file函数,来分别为dev_attr_fsl_v4l2_capture_propertydev_attr_fsl_v4l2_overlay_propertydev_attr_fsl_csi_property/sys/class/下创建对应的属性文件。


至此,mxc_v4l2_probe函数就分析完毕了。










你可能感兴趣的:(i.MX6-IPU子系统)