块设备驱动学习笔记(二)——实例模板

 

vmem_disk驱动

此驱动大体分成三个部分:加载、卸载模块,block_device_operations结构体中各方法的实现,I/O请求处理函数

前两部分相对容易理解,现帖出最后一部分。

 

static void vmem_disk_transfer(struct vmem_disk_dev *dev,unsigned long sector,unsigned long nsector,char *buffer,int write)

{

       unsigned long offset=sector*KERNEL_SECTOR_SIZE;

       unsigned long nbytes=nsect*KERNEL_SECTOR_SIZE;

 

       if((offset+nbytes)>dev->size){

              printk(KERN_NOTICE ”Beyond-end write(%ld %ld)\n”,offset,nbytes);

              return;

       }

if(write)

       memcpy(dev->data+offset,buffer,nbytes);

       else

       memcpy(buffer,dev->data+offset,nbytes);

}

 

//以上是实际操作块设备的代码,但不知为何采用memcpy()这个函数。

static void vmem_disk_request(struct request_queue *q)

{

       struct request *req;

 

       while((req=elv_next_request(q))!=NULL){

              struct vmem_disk_dev *dev=req->rq_disk->private_data;

              if(!blk_fs_request(req)){

                     printk(KERN_NOTICE “Skip non-fd request\n”);

                     end_request(req,0);

                     continue;

                            }

              vmem_disk_transfer(dev,req->sector,req->current_nr_sectors,req->buffer,rq_data_dir(req));

              end_request(req,1);

              }

}

 

/*以上函数为request_mode=RM_SIMPLE时,请求队列绑定的处理函数。处理函数调用的vmem_disk_transfer所用到的参数,皆由request提供。elv_next_request()函数是从队列中取出一个要处理的请求*/

 

static int vmem_disk_xfer_bio(struct vmem_disk_dev *dev,struct bio *bio)

{

       int i;

       struct bio_vec *bec;

       sector_t sector=bio->bi_sector;

 

       bio_for_each_segment(bvec,bio,i){

       char *buffer = __bio_kmap_atomic(bio,i,KM_USER0);

       vmem_disk_transfer(dev,sector,bio_cur_sectors(bio),buffer,bio_data_dir(bio)==WRITE);

       sector += bio_cur_sectors(bio);

       __bio_kunmap_atomic(bio,KM_USER0);

       }

       return 0;

}

 

/*以上函数由“制造请求函数”调用,从实际效果看相当于拥有了一个实际request产生的结果。因为调用绑定“制造请求”函数的初衷是不使用请求。而请求是由上层bio经块层转化而来的。所有就只能使用bio来确定物理结构相关的操作地址。__bio_kmap_atomic()函数返回一个bio第i个缓冲区的虚拟地址,至于为什么char 类型,真是有点费解。接下来调用的vmem_disk_transfer所用到的参数皆由bio结构来提供*/

 

static int mem_disk_xfer_request(struct vmem_disk_dev *dev,struct request *req)

{

       struct req_iterator iter;

       int nsect=0;

       struct bio_vec *bvec;

      

       rq_for_each_segment(bvec,req,iter){

              char *buffer=_bio_kmap_atomic(iter.bio,iter.i,KM_USER0);

              sector_t sector =iter.bio->bi_sector;

              vmem_disk_transfer(dev,sector,bio_cur_sectors(iter.bio),buffer,bio_data_dir(iter.bio)==WRITE);

       sector +=bio_cur_sectors(iter.bio);

       __bio_kunmap_atomic(iter.bio,KM_USER0);

       nsec +=iter.bio->bi_size/KERNEL_SECTOR_SIZE;

       }

return nsect;

}

 

/*以上函数处理过程和vmem_disk_xfer_bio的处理过程很相似。只是多引入了一个结构struct req_iterator, 然后在vmem_disk_transfer的各参数前加上一个iter的前缀。此函数是在request_mode=RM_FULL时调用的,至于为什么多加一个结构体就功能强大一些,待解决。*/

static void vmem_disk_full_request(struct request_queue *q)

{

       struct request *req;

       int sectors_xferred;

       struct vmem_disk_dev *dev =q->queuedata;

 

       while((req=elv_next_request(q))!=NULL){

       if(!blk_fs_request(req){

       printk(KERN “skip non-fs request\n”);

       end_request(req,0);

       continue;

       }

sectors_sferred=vmem_disk_xfer_request(dev,req);

       end_request(req,1):

}

}

 

static int vmem_disk_make_request(struct request_queue *q,struct bio *bio)

{

       struct vmem_disk_dev *dev=q->queuedata;

       int status;

       status=vmem_disk_xfer_bio(dev,bio);

       bio_endio(bio,status);

       return 0;

}

你可能感兴趣的:(struct,user,iterator,buffer,each,disk)