前言:
本来最初的目的只是为了研究内核中nand的驱动,但好奇心太强,忍不住往上层追溯,然后再往下层跟踪。这个过程中,
特别是在block层,yaffs文件系统和用户空间中相关的分析,其实仅仅只是找到了他们之间的关键联系点而已,没有深
入详细分析。
* linux2.6.29
* pxa935
* Hynix NAND 512MB 1.8V 16-bit
* 李枝果/lizgo 2010-11-8 [email protected]
* 由于本人水平有限,望读者阅读时三思,同时也指正我的错误,谢谢!
话说MTD子系统向上层提供了几种在用户空间可以直接使用的接口:Raw char device、Raw block device、FTL、NFTL、
JFFS(2)。前文中主要讨论的yaffs文件系统没有包含在其中,因为yaffs是直接建立在MTD原始设备层之上的。在nand驱动
注册的时候,会在probe函数中将nand的每个分区通过mtd块设备层、通用磁盘层、block层向内核注册成一个block设备。
另外挂载yaffs文件系统的时候呢,就会通过设备节点找到block层中对应的block_device结构体,最后在填充超级块的时候
直接通过主次设备号在mtd_table[]中找到对应的mtd_info结构体。而且在yaffs文件系统层封装的读写等函数中,也是将
找到的mtd_info结构体传入,直接利用了mtd_info结构体中的相关读写函数指针来进行底层的读写。
本文主要讨论的接口是Raw block device,也就是传说中的mtdblock,本文中称为mtdblock翻译层,
主要集中在文件drivers/mtd/mtdblock.c
一、init_mtdblock()
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
.major = 31,
.part_bits = 0,
.blksize = 512,
.open = mtdblock_open,
.flush = mtdblock_flush,
.release = mtdblock_release,
.readsect = mtdblock_readsect,
.writesect = mtdblock_writesect,
.add_mtd = mtdblock_add_mtd,
.remove_dev = mtdblock_remove_dev,
.owner = THIS_MODULE,
};
static int __init init_mtdblock(void)
{
return register_mtd_blktrans(&mtdblock_tr);
}
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
int ret, i;
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
us over. */
// 注册一个该接口的用户通知器,在分区动态添加或删除的时候被调用,
// 通知使用该接口的用户做出相应动作。
// 关于这个用户通知器后面再介绍
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier);
tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
if (!tr->blkcore_priv)
return -ENOMEM;
mutex_lock(&mtd_table_mutex);
ret = register_blkdev(tr->major, tr->name); // 块设备注册,major_names
// 此时可以在proc/devices中看到注册的该设备
if (ret) {
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
tr->name, tr->major, ret);
kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return ret;
}
spin_lock_init(&tr->blkcore_priv->queue_lock); // 初始化请求队列锁
tr->blkcore_priv->rq = blk_init_queue(mtd_blktrans_request, &tr->blkcore_priv->queue_lock);
// 初始化请求队列头和安装请求处理函数mtd_blktrans_request()
if (!tr->blkcore_priv->rq) {
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return -ENOMEM;
}
tr->blkcore_priv->rq->queuedata = tr; // 保存该翻译层的操作集 , &mtdblock_tr
blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
if (tr->discard)
blk_queue_set_discard(tr->blkcore_priv->rq,
blktrans_discard_request);
tr->blkshift = ffs(tr->blksize) - 1;
tr->blkcore_priv->thread = kthread_run(mtd_blktrans_thread, tr,
"%sd", tr->name); // 创建内核线程mtd_blktrans_thread
if (IS_ERR(tr->blkcore_priv->thread)) {
blk_cleanup_queue(tr->blkcore_priv->rq);
unregister_blkdev(tr->major, tr->name);
kfree(tr->blkcore_priv);
mutex_unlock(&mtd_table_mutex);
return PTR_ERR(tr->blkcore_priv->thread);
}
INIT_LIST_HEAD(&tr->devs);// 该链表用来挂接使用该翻译层的所有设备
list_add(&tr->list, &blktrans_majors);
// 所有翻译层通过blktrans_majors链表来链接在一起
for (i=0; i<MAX_MTD_DEVICES; i++) {
if (mtd_table[i] && mtd_table[i]->type != MTD_ABSENT)
tr->add_mtd(tr, mtd_table[i]);
}
// 对mtd_table中的所有分区调用该翻译层操作集中的add_mtd函数
mutex_unlock(&mtd_table_mutex);
return 0;
}
使用函数blk_init_queue初始化一个请求队列和安装一个请求处理函数mtd_blktrans_request():
static void mtd_blktrans_request(struct request_queue *rq)
{
struct mtd_blktrans_ops *tr = rq->queuedata;
wake_up_process(tr->blkcore_priv->thread); // 唤醒内核线程
}
接下来就创建一个内核线程:mtd_blktrans_thread()
static int mtd_blktrans_thread(void *arg)
{
struct mtd_blktrans_ops *tr = arg;
struct request_queue *rq = tr->blkcore_priv->rq;
/* we might get involved when memory gets low, so use PF_MEMALLOC */
current->flags |= PF_MEMALLOC;
spin_lock_irq(rq->queue_lock);
while (!kthread_should_stop()) {
struct request *req;
struct mtd_blktrans_dev *dev;
int res = 0;
req = elv_next_request(rq); // 获取对列中第一个未完成的请求
if (!req) { // 如果请求为空将线程置于睡眠状态
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(rq->queue_lock);
schedule();
spin_lock_irq(rq->queue_lock);
continue;
}
/*
一个请求队列管理着很多请求,但是每一个请求都只能针对一个块设备gendisk。
所以每一个请求被创建出来后都会指向它的请求对象gendisk。
这个指向关系在函数__make_request()->init_request_from_bio()->blk_rq_bio_prep()
-> rq->rq_disk = bio->bi_bdev->bd_disk
中建立。
*/
dev = req->rq_disk->private_data;
tr = dev->tr;
spin_unlock_irq(rq->queue_lock);
mutex_lock(&dev->lock);
res = do_blktrans_request(tr, dev, req);
/*
在函数do_blktrans_request(tr, dev, req)中
根据请求的数据传输方向来决定调用读或写函数进行数据传输
tr->readsect(dev, block, buf)
tr->writesect(dev, block, buf)
*/
//如果res是0表示不能成功完成请求,为非0表示成功完成请求
mutex_unlock(&dev->lock);
spin_lock_irq(rq->queue_lock);
end_request(req, res);
}
spin_unlock_irq(rq->queue_lock);
return 0;
}
二、用户通知器
结构体定义:
struct mtd_notifier {
void (*add)(struct mtd_info *mtd);
void (*remove)(struct mtd_info *mtd);
struct list_head list;
};
有两个方法和一个链表挂钩,参数均为mtd_info指针。
在drivers/mtd/mtd_blkdevs.c中定义了下面的块翻译层通知器
static struct mtd_notifier blktrans_notifier = {
.add = blktrans_notify_add,
.remove = blktrans_notify_remove,
};
该通知器被FTL、NFTL、mtdblock翻译层使用。
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
...
/* Register the notifier if/when the first device type is
registered, to prevent the link/init ordering from fucking
us over. */
if (!blktrans_notifier.list.next)
register_mtd_user(&blktrans_notifier); // 只有第一个翻译层注册的时候该函数才会调用
...
}
void register_mtd_user (struct mtd_notifier *new)
{
int i;
mutex_lock(&mtd_table_mutex);
list_add(&new->list, &mtd_notifiers);
// 将这个新的用户通知器添加到全局链表mtd_notifiers中(该链表中还可能存在其他用户通知器)
__module_get(THIS_MODULE);// 增加模块引用计数
for (i=0; i< MAX_MTD_DEVICES; i++)
if (mtd_table[i])
new->add(mtd_table[i]);
// 对mtd_table中的所有分区调用该翻译层操作集中的add_mtd函数,第一次注册该工作在后面会重复再做一次
// blktrans_notify_add()
mutex_unlock(&mtd_table_mutex);
}
static void blktrans_notify_add(struct mtd_info *mtd)
{
struct mtd_blktrans_ops *tr;
if (mtd->type == MTD_ABSENT)
return;
list_for_each_entry(tr, &blktrans_majors, list)
tr->add_mtd(tr, mtd);
// blktrans_majors链表管理着所有的翻译层操作集结构体
// 该处的意思是对于传入的同一个mtd_info结构体,所有的翻译层都会调用自己的
// add_mtd函数(这些函数都不一样,对于mtdblock层该函数是mtdblock_add_mtd())
}
在系统启动的时候,register_mtd_blktrans(&mtdblock_tr)执行的时候,mtd_table数组中是空的,所以就不会执行到翻译层
的add_mtd函数上来,那么在又在什么时候调用了翻译层的add_mtd()函数了呢?请看下面
三、将分区向上层注册成block device。
// pxa3xx_nand.c
在注册nand驱动的时候:
pxa3xx_nand_init()
--> platform_driver_register()
--> ...经过注册和设备匹配后调用probe()函数
--> pxa3xx_nand_probe()
--> ...
--> add_mtd_partitions(monahans_mtd, pdata->parts, pdata->nr_parts)
--> add_one_partition()
--> add_mtd_device()
--> list_for_each_entry(not, &mtd_notifiers, list)
not->add(mtd);
// 这里就是调用mtd_notifiers链表中所有用户通知器的add函数,以mtdblock的用户通知
// 器为例,那么就是调用函数 mtdblock_add_mtd()。这就是证实了当添加一个分区的时候
// 用户通知器的add函数被调用,那么当移除分区的时候,remove函数就会被调用,只是
// 我们这里没有移除的分区动作。
代码如下:
static int pxa3xx_nand_probe(struct platform_device *pdev)
{
...
return add_mtd_partitions(monahans_mtd, pdata->parts, pdata->nr_parts);
...
}
int add_mtd_partitions(struct mtd_info *master,
const struct mtd_partition *parts,
int nbparts)
{
struct mtd_part *slave;
uint64_t cur_offset = 0;
int i;
printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
for (i = 0; i < nbparts; i++) {
slave = add_one_partition(master, parts + i, i, cur_offset);
if (!slave)
return -ENOMEM;
cur_offset = slave->offset + slave->mtd.size;
}
return 0;
}
static struct mtd_part *add_one_partition(struct mtd_info *master,
const struct mtd_partition *part, int partno,
uint64_t cur_offset)
{
struct mtd_part *slave;
/* allocate the partition structure */
slave = kzalloc(sizeof(*slave), GFP_KERNEL);
if (!slave) {
printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
master->name);
del_mtd_partitions(master);
return NULL;
}
list_add(&slave->list, &mtd_partitions);
// mtd_partitions 用于在MTD原始设备层统一管理所有分区信息
// static LIST_HEAD(mtd_partitions) 本文件中定义
/* set up the MTD object for this partition */
slave->mtd.type = master->type;
slave->mtd.flags = master->flags & ~part->mask_flags;
slave->mtd.size = part->size; // 分区大小
slave->mtd.writesize = master->writesize;
slave->mtd.oobsize = master->oobsize;
slave->mtd.oobavail = master->oobavail;
slave->mtd.subpage_sft = master->subpage_sft;
slave->mtd.name = part->name; // 分区名字
slave->mtd.owner = master->owner;
slave->mtd.read = part_read; // 分区读写函数
slave->mtd.write = part_write;
if (master->panic_write)
slave->mtd.panic_write = part_panic_write;
if (master->point && master->unpoint) {
slave->mtd.point = part_point;
slave->mtd.unpoint = part_unpoint;
}
if (master->read_oob)
slave->mtd.read_oob = part_read_oob;
if (master->write_oob)
slave->mtd.write_oob = part_write_oob;
if (master->read_user_prot_reg)
slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
if (master->read_fact_prot_reg)
slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
if (master->write_user_prot_reg)
slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
if (master->lock_user_prot_reg)
slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
if (master->get_user_prot_info)
slave->mtd.get_user_prot_info = part_get_user_prot_info;
if (master->get_fact_prot_info)
slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
if (master->sync)
slave->mtd.sync = part_sync;
if (!partno && master->suspend && master->resume) {
slave->mtd.suspend = part_suspend;
slave->mtd.resume = part_resume;
}
if (master->writev)
slave->mtd.writev = part_writev;
if (master->lock)
slave->mtd.lock = part_lock;
if (master->unlock)
slave->mtd.unlock = part_unlock;
if (master->block_isbad)
slave->mtd.block_isbad = part_block_isbad;
if (master->block_markbad)
slave->mtd.block_markbad = part_block_markbad;
slave->mtd.erase = part_erase;
slave->master = master; // 该分区的主分区
slave->offset = part->offset; // 该分区偏移
slave->index = partno; // 分区索引
if (slave->offset == MTDPART_OFS_APPEND)
slave->offset = cur_offset;
if (slave->offset == MTDPART_OFS_NXTBLK) {
slave->offset = cur_offset;
if (mtd_mod_by_eb(cur_offset, master) != 0) {
/* Round up to next erasesize */
slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
printk(KERN_NOTICE "Moving partition %d: "
"0x%012llx -> 0x%012llx\n", partno,
(unsigned long long)cur_offset, (unsigned long long)slave->offset);
}
}
if (slave->mtd.size == MTDPART_SIZ_FULL)
slave->mtd.size = master->size - slave->offset;
printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
// 打印分区表信息
/* let's do some sanity checks */
if (slave->offset >= master->size) {
/* let's register it anyway to preserve ordering */
slave->offset = 0;
slave->mtd.size = 0;
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
}
if (slave->offset + slave->mtd.size > master->size) {
slave->mtd.size = master->size - slave->offset;
printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
part->name, master->name, (unsigned long long)slave->mtd.size);
}
if (master->numeraseregions > 1) {
/* Deal with variable erase size stuff */
int i, max = master->numeraseregions;
u64 end = slave->offset + slave->mtd.size;
struct mtd_erase_region_info *regions = master->eraseregions;
/* Find the first erase regions which is part of this
* partition. */
for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
;
/* The loop searched for the region _behind_ the first one */
i--;
/* Pick biggest erasesize */
for (; i < max && regions[i].offset < end; i++) {
if (slave->mtd.erasesize < regions[i].erasesize) {
slave->mtd.erasesize = regions[i].erasesize;
}
}
BUG_ON(slave->mtd.erasesize == 0);
} else {
/* Single erase size */
slave->mtd.erasesize = master->erasesize; // 分区擦除大小赋值
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->offset, &slave->mtd)) {
/* Doesn't start on a boundary of major erase size */
/* FIXME: Let it be writable if it is on a boundary of
* _minor_ erase size though */
slave->mtd.flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
part->name);
}
if ((slave->mtd.flags & MTD_WRITEABLE) &&
mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
slave->mtd.flags &= ~MTD_WRITEABLE;
printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
part->name);
}
slave->mtd.ecclayout = master->ecclayout;
if (master->block_isbad) {
uint64_t offs = 0;
while (offs < slave->mtd.size) {
if (master->block_isbad(master,
offs + slave->offset))
slave->mtd.ecc_stats.badblocks++; // 分区内坏块检查统计
offs += slave->mtd.erasesize;
}
}
out_register:
if (part->mtdp) {
/* store the object pointer (caller may or may not register it*/
*part->mtdp = &slave->mtd;
slave->registered = 0;
} else {
/* register our partition */
add_mtd_device(&slave->mtd); // importment
// 将该从分区作为MTD原始设备加入到mtd_table中,成功返回0
// MTD原始设备层和MTD设备层就是依靠mtd_table来联系的
slave->registered = 1;
}
return slave;
}
int add_mtd_device(struct mtd_info *mtd)
{
int i;
BUG_ON(mtd->writesize == 0);
mutex_lock(&mtd_table_mutex);
for (i=0; i < MAX_MTD_DEVICES; i++)
if (!mtd_table[i]) {
struct mtd_notifier *not;
mtd_table[i] = mtd; // 填充mtd_table[]数组,mtd原始设备层和mtd块设备层通过mtd_table[]联系在了一起
mtd->index = i; // mtd_table[]数组的下标赋给mtd->index
mtd->usecount = 0;
if (is_power_of_2(mtd->erasesize))
mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
else
mtd->erasesize_shift = 0;
if (is_power_of_2(mtd->writesize))
mtd->writesize_shift = ffs(mtd->writesize) - 1;
else
mtd->writesize_shift = 0;
mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE)
&& (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
if (mtd->unlock(mtd, 0, mtd->size))
printk(KERN_WARNING
"%s: unlock failed, "
"writes may not work\n",
mtd->name);
}
DEBUG(0, "mtd: Giving out device %d to %s\n",i, mtd->name);
/* No need to get a refcount on the module containing
the notifier, since we hold the mtd_table_mutex */
list_for_each_entry(not, &mtd_notifiers, list)
not->add(mtd); // 只要底层向上层添加一个mtd原始设备的话,那么就会遍历所有用户通知器
// 然后调用其add函数,再向mtd块设备层的上层通用磁盘层和block层注册。
// 对于mtdblock翻译层,其add_mtd函数指针指向mtdblock_add_mtd()
mutex_unlock(&mtd_table_mutex);
/* We _know_ we aren't being removed, because
our caller is still holding us here. So none
of this try_ nonsense, and no bitching about it
either. :) */
__module_get(THIS_MODULE);
return 0;
}
mutex_unlock(&mtd_table_mutex);
return 1;
}
//////////////////////////////////////////////////////////////////////////////////////////////
struct mtd_blktrans_dev {
struct mtd_blktrans_ops *tr;
struct list_head list;
struct mtd_info *mtd;
struct mutex lock;
int devnum;
unsigned long size;
int readonly;
void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */
};
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return;
// 这里将分区作为的MTD设备联系在了一起,mtd_table
dev->mtd = mtd;
dev->devnum = mtd->index; // 分区表的索引值
dev->size = mtd->size >> 9; // 该分区的大小以512为单位来计算
dev->tr = tr; // 操作集
if (!(mtd->flags & MTD_WRITEABLE))
dev->readonly = 1;
add_mtd_blktrans_dev(dev);
// 后面详解
}
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
struct mtd_blktrans_ops *tr = new->tr;
struct mtd_blktrans_dev *d;
int last_devnum = -1;
struct gendisk *gd;
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
list_for_each_entry(d, &tr->devs, list) {
if (new->devnum == -1) {
/* Use first free number */
if (d->devnum != last_devnum+1) {
/* Found a free devnum. Plug it in here */
new->devnum = last_devnum+1;
list_add_tail(&new->list, &d->list);
goto added;
}
} else if (d->devnum == new->devnum) {
/* Required number taken */
return -EBUSY;
// 这里返回上层没有错误判断,因此也就没有释放掉上层函数开辟的struct mtd_blktrans_dev
// 的内存空间,存在内存泄露,这里应该算是一个内核bug吧,不过呢,这个bug基本上是不会出现的
// 如果你的分区不会动态被增加或者删除的话,这里就不会返回这个错误的
} else if (d->devnum > new->devnum) {
/* Required number was free */
list_add_tail(&new->list, &d->list);
goto added;
}
last_devnum = d->devnum;
}
if (new->devnum == -1)
new->devnum = last_devnum+1;
if ((new->devnum << tr->part_bits) > 256) {
return -EBUSY;
}
list_add_tail(&new->list, &tr->devs);
// 翻译层操作集管理者所有属于该层的设备
added:
mutex_init(&new->lock);
if (!tr->writesect)
new->readonly = 1;
gd = alloc_disk(1 << tr->part_bits); // note 1
// 分配gendisk结构体空间,并做一些初始的设置
if (!gd) {
list_del(&new->list);
return -ENOMEM;
}
gd->major = tr->major; // mtdblock , 31
gd->first_minor = (new->devnum) << tr->part_bits; // mtd_table[]下标
gd->fops = &mtd_blktrans_ops; // 操作函数集
if (tr->part_bits) // 0
if (new->devnum < 26)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c", tr->name, 'a' + new->devnum);
else
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c%c", tr->name,
'a' - 1 + new->devnum / 26,
'a' + new->devnum % 26);
else
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
// gd->disk_name = mtdblock{0 - 31}
/* 2.5 has capacity in units of 512 bytes while still
having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
set_capacity(gd, (new->size * tr->blksize) >> 9);
gd->private_data = new;
new->blkcore_priv = gd; // 互相指向对方
gd->queue = tr->blkcore_priv->rq; // 该mtd设备的请求队列
if (new->readonly)
set_disk_ro(gd, 1);
add_disk(gd); // 向上层添加一个gendisk note2
return 0;
}
/** note1 gd = alloc_disk(1) **/
struct gendisk *alloc_disk(int minors)
{
return alloc_disk_node(minors, -1); // note1-1
}
/**** note1-1 alloc_disk_node(1, -1) ****/
struct gendisk *alloc_disk_node(int minors, int node_id)
{
struct gendisk *disk;
disk = kmalloc_node(sizeof(struct gendisk),
GFP_KERNEL | __GFP_ZERO, node_id); // node_id就是NUMA系统中的节点号
if (disk) {
if (!init_part_stats(&disk->part0)) {
kfree(disk);
return NULL;
}
disk->node_id = node_id; // -1
if (disk_expand_part_tbl(disk, 0)) {
free_part_stats(&disk->part0);
kfree(disk);
return NULL;
}
disk->part_tbl->part[0] = &disk->part0;
disk->minors = minors; // 1
rand_initialize_disk(disk);
disk_to_dev(disk)->class = &block_class; // #define disk_to_dev(disk) (&(disk)->part0.__dev)
disk_to_dev(disk)->type = &disk_type;
device_initialize(disk_to_dev(disk));
INIT_WORK(&disk->async_notify,
media_change_notify_thread);
}
return disk;
}
/**** note1-1 alloc_disk_node(1, -1) ****/
/** note1 gd = alloc_disk(1) **/
/** note2 add_disk(gd) **/
void add_disk(struct gendisk *disk)
{
struct backing_dev_info *bdi;
dev_t devt;
int retval;
/* minors == 0 indicates to use ext devt from part0 and should
* be accompanied with EXT_DEVT flag. Make sure all
* parameters make sense.
*/
WARN_ON(disk->minors && !(disk->major || disk->first_minor));
WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT));
disk->flags |= GENHD_FL_UP;
retval = blk_alloc_devt(&disk->part0, &devt); // note2-1
if (retval) {
WARN_ON(1);
return;
}
disk_to_dev(disk)->devt = devt; // disk->part0.__dev->devt = devt
// 主次设备号
/* ->major and ->first_minor aren't supposed to be
* dereferenced from here on, but set them just in case.
*/
disk->major = MAJOR(devt); // 31
disk->first_minor = MINOR(devt); // {0 - 31}
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
register_disk(disk); // note2-2
blk_register_queue(disk);
bdi = &disk->queue->backing_dev_info;
bdi_register_dev(bdi, disk_devt(disk));
retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj,
"bdi");
WARN_ON(retval);
}
/**** note2-1 blk_alloc_devt() ****/
int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
{
struct gendisk *disk = part_to_disk(part);
int idx, rc;
// part->partno = 0, disk->minors = 1, disk->major = 31, disk->first_minor = {0 - 31}
/* in consecutive minor range? */
if (part->partno < disk->minors) {
*devt = MKDEV(disk->major, disk->first_minor + part->partno); // 最后组织的主次设备号为31<<20 | {0 ~ 31}
return 0;
}
/* allocate ext devt */ // 另外的方式来分配主次设备号
do {
if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL))
return -ENOMEM;
rc = idr_get_new(&ext_devt_idr, part, &idx);
} while (rc == -EAGAIN);
if (rc)
return rc;
if (idx > MAX_EXT_DEVT) {
idr_remove(&ext_devt_idr, idx);
return -EBUSY;
}
*devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx));
return 0;
}
/**** note2-1 blk_alloc_devt() ****/
/**** note2-2 register_disk(disk) ****/
void register_disk(struct gendisk *disk)
{
struct device *ddev = disk_to_dev(disk);
struct block_device *bdev;
struct disk_part_iter piter;
struct hd_struct *part;
int err;
ddev->parent = disk->driverfs_dev;
dev_set_name(ddev, disk->disk_name); // mtdblock{0 - 31}
/* delay uevents, until we scanned partition table */
ddev->uevent_suppress = 1;
if (device_add(ddev)) // 将该设备添加到系统设备树中
return;
#ifndef CONFIG_SYSFS_DEPRECATED
err = sysfs_create_link(block_depr, &ddev->kobj,
kobject_name(&ddev->kobj));
if (err) {
device_del(ddev);
return;
}
#endif
disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj);
disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj);
/* No minors to use for partitions */
if (!disk_partitionable(disk))
goto exit;
/* No such device (e.g., media were just removed) */
if (!get_capacity(disk))
goto exit;
bdev = bdget_disk(disk, 0); // note2-2-1
if (!bdev)
goto exit;
bdev->bd_invalidated = 1;
err = blkdev_get(bdev, FMODE_READ); // 获取一次以验证
if (err < 0)
goto exit;
blkdev_put(bdev, FMODE_READ);
exit:
/* announce disk after possible partitions are created */
ddev->uevent_suppress = 0;
kobject_uevent(&ddev->kobj, KOBJ_ADD);
/* announce possible partitions */
disk_part_iter_init(&piter, disk, 0);
while ((part = disk_part_iter_next(&piter)))
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD);
disk_part_iter_exit(&piter);
}
/****** note2-2-1 bdev = bdget_disk(disk, 0) ******/
struct block_device *bdget_disk(struct gendisk *disk, int partno)
{
struct hd_struct *part;
struct block_device *bdev = NULL;
part = disk_get_part(disk, partno);
if (part)
bdev = bdget(part_devt(part)); // note 2-2-1-1
// 根据主次设备号得到block_device结构体
disk_put_part(part);
return bdev;
}
/******** note2-2-1-1 bdev = bdget(part_devt(part)) ********/
// fs/block_dev.c
struct bdev_inode {
struct block_device bdev;
struct inode vfs_inode;
};
static inline struct bdev_inode *BDEV_I(struct inode *inode)
{
return container_of(inode, struct bdev_inode, vfs_inode);
}
inline struct block_device *I_BDEV(struct inode *inode)
{
return &BDEV_I(inode)->bdev;
}
static inline unsigned long hash(dev_t dev)
{
return MAJOR(dev)+MINOR(dev);
}
static int bdev_test(struct inode *inode, void *data)
{
return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data;
}
static int bdev_set(struct inode *inode, void *data)
{
BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data;
return 0;
}
static LIST_HEAD(all_bdevs);
struct block_device *bdget(dev_t dev)
{
struct block_device *bdev;
struct inode *inode;
inode = iget5_locked(blockdev_superblock, hash(dev),
bdev_test, bdev_set, &dev);
// 该函数最终会调用函数bdev_set()将dev的值赋值给BDEV_I(inode)->bdev.bd_dev
if (!inode)
return NULL;
bdev = &BDEV_I(inode)->bdev;
if (inode->i_state & I_NEW) {
bdev->bd_contains = NULL;
bdev->bd_inode = inode; // inode关联到block_device结构体中
bdev->bd_block_size = (1 << inode->i_blkbits);
bdev->bd_part_count = 0;
bdev->bd_invalidated = 0;
inode->i_mode = S_IFBLK; // 对应的是块设备节点
inode->i_rdev = dev; // 主次设备号关联
inode->i_bdev = bdev; // block_device结构体关联
/************************
在使用mount挂载该设备上的文件系统时,例如上一篇文章中为了挂载nand分区中的yaffs2文件系统,那么系统是在哪个
地方使用了注册时设置的这些信息呢?
...
get_sb_bdev()
--> open_bdev_exclusive()
--> lookup_bdev()
--> kern_path()
--> bdev = bd_acquire(inode)
这里只是取出了block_device结构体的指针
而对于上面函数iget5_locked()-->bdev_set()中设置在对应block_device结构体中的主次设备号在文件系统挂载的时候
,在下面这个函数中获取到后存放在超级块中。
...
get_sb_bdev()
--> open_bdev_exclusive()
--> sget()
--> set_bdev_super()
--> s->s_bdev = data; // 就是open_bdev_exclusive()函数获得的block_device结构体
--> s->s_dev = s->s_bdev->bd_dev; // 主次设备号
最后在yaffs2文件系统超级块填充函数yaffs_internal_read_super()中是这么使用的:
...
if (MAJOR(sb->s_dev) != MTD_BLOCK_MAJOR)
return NULL; /* This isn't an mtd device */
...
mtd = get_mtd_device(NULL, MINOR(sb->s_dev)); // 取得对应的mtd_info结构体
...
************************/
inode->i_data.a_ops = &def_blk_aops;
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
inode->i_data.backing_dev_info = &default_backing_dev_info;
spin_lock(&bdev_lock);
list_add(&bdev->bd_list, &all_bdevs);
spin_unlock(&bdev_lock);
unlock_new_inode(inode);
}
return bdev; // 返回这个block_device指针
}
/******** note2-2-1-1 bdev = bdget(part_devt(part)) ********/
/****** note2-2-1 bdev = bdget_disk(disk, 0) ******/
/**** note2-2 register_disk(disk) ****/
/** note2 add_disk(gd) **/
四、一点补充
static struct mtdblk_dev {
struct mtd_info *mtd;
int count;
struct mutex cache_mutex;
unsigned char *cache_data;
unsigned long cache_offset;
unsigned int cache_size;
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
} *mtdblks[MAX_MTD_DEVICES]; // #define MAX_MTD_DEVICES 32
/*
** 这里是定义了一个指针数组,其中的每一个指针均指向一个struct mtdblk_dev
** 的类型的对象,每一个struct mtdblk_dev类型的对象都是一个MTD块设备,
** 这里的每一个指针指向的MTD块设备和mtd_table[]中元素指向的每一个
** struct mtd_info一一对应。
** 另外,可以看出linux最多支持32个MTD块设备
关于mtdblks[]指针数组,我这里是以yaffs文件系统为例,所以是不会使用到这个指针数组的,应为yaffs它是建立在mtd原始设备层上,在其封装的函数内直接使用了mtd_info结构体内的函数,而没有经过mtdblock翻译层。如果我们的系统中使用的是mtd的其他接口,比如block device,那么就会使用到这个指针数组,在哪里使用呢?
mtdblock.c文件中定义了mtdblock翻译层的操作集:
static struct mtd_blktrans_ops mtdblock_tr = {
.name = "mtdblock",
.major = 31,
.part_bits = 0,
.blksize = 512,
.open = mtdblock_open,
.flush = mtdblock_flush,
.release = mtdblock_release,
.readsect = mtdblock_readsect,
.writesect = mtdblock_writesect,
.add_mtd = mtdblock_add_mtd,
.remove_dev = mtdblock_remove_dev,
.owner = THIS_MODULE,
};
这些mtdblks[]的指针在哪里赋值的呢?请看函数mtdblock_open()中:
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
{
// mtd_blktrans_dev这个设备就是我们在初始化经过mtdblock翻译层向上层注册时的产物,表示了本层环境中的mtd设备
// mbd->mtd在mtdblock_add_mtd()函数中被赋值,就是对应的mtd原始设备
struct mtdblk_dev *mtdblk;
struct mtd_info *mtd = mbd->mtd;
int dev = mbd->devnum;
DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
if (mtdblks[dev]) {
mtdblks[dev]->count++;
return 0;
} // 如果已经打开了,那么只需要增加引用计数
/* OK, it's not open. Create cache info for it */
mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); // 否则,分配空间
if (!mtdblk)
return -ENOMEM;
mtdblk->count = 1; // 引用计数初始化成1
mtdblk->mtd = mtd; // 重要的联系
mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY;
if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) {
mtdblk->cache_size = mtdblk->mtd->erasesize;
mtdblk->cache_data = NULL;
}
mtdblks[dev] = mtdblk; // mtdblks指针数组中相应位置设置
DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
return 0;
}