mtd子系统----设备层


   设备层是实现了文件系统与Flash之间的桥梁,其基于MTD原始层提供了两种上层访问Flash的方式:MTD的字符设备和块设备,字符设备通过向内核注册字符设备的file_operations结构实现了对MTD设备的读写和控制,提供了对闪存的原始字符访问,关联的设备是/dev/mtd*,而MTD块设备则是定义了一个描述MTD块设备mtdblock_tr的结构,关联的设备是/dev/mtdblock*,下面主要看看其实现的原理。

1. MTD字符设备

  对于MTD的字符设备,主要集中在driver/mtd/mtdchar.c文件中,其流程相对比较简单,主要是向内核注册了一个字符设备(主设备号为90)并提供有其操作集file_operations,其代码为

int __init init_mtdchar(void)
{
    int ret;

    ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
                   "mtd", &mtd_fops);
    if (ret < 0) {
        pr_err("Can't allocate major number %d for MTD\n",
               MTD_CHAR_MAJOR);
        return ret;
    }

    return ret;
}

只是简单的一句话__register_chrdev,标标准准的字符设备的接口,这个只是在/dev下面创建了mtd*这个文件,并且提供了mtd_fops,当系统将Flash设备当作字符设备,访问/dev/mtd*(设备是系统实现mtd分区所对应的字符设备)时,会调用到操作集的对应的操作函数集中对应的函数来实现把数据读入/读出Flash。

 1 static const struct file_operations mtd_fops = {
 2     .owner        = THIS_MODULE,
 3     .llseek        = mtdchar_lseek,
 4     .read        = mtdchar_read,
 5     .write        = mtdchar_write,
 6     .unlocked_ioctl    = mtdchar_unlocked_ioctl,
 7 #ifdef CONFIG_COMPAT
 8     .compat_ioctl    = mtdchar_compat_ioctl,
 9 #endif
10     .open        = mtdchar_open,
11     .release    = mtdchar_close,
12     .mmap        = mtdchar_mmap,
13 #ifndef CONFIG_MMU
14     .get_unmapped_area = mtdchar_get_unmapped_area,
15     .mmap_capabilities = mtdchar_mmap_capabilities,
16 #endif
17 }

其实就是实现了字符设备接口,用户可以直接read/write系统调用可以实现对flash的操作,那么看看一个read的过程,其实从下面的流程来看,最后会根据此时Flash支持的类型来调用mtd原始层mtd_info对应的相关接口。

其实在对Flash的操作之前,需要擦除才能写,那么就需要有其他的命令,如擦除,获取Flash信息,写ecc等接口,这些接口可以通过ioctl来实现,其结果也是会用到mtd_info这个结构。

2. MTD块设备层

   mtd块设备代码文件是Mtd_blkdevs.c,其的功能是为mtd块设备读写提供缓冲操作对于块设备层,而还有mtdblock_ro.c,它定义的是mtd块设备缓冲的只读操作。首先看看是块设备层是如何注册一个MTD的块设备

static int __init init_mtdblock(void)
{
    return register_mtd_blktrans(&mtdblock_tr);
}

其仅仅调用了register_mtd_blktrans,同样也提供了块设备的操作函数集mtdblock_tr,那么重点函数出现了,register_mtd_blktrans主要是干了些什么事呢?怎么完成块设备的访问呢?

int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
    struct mtd_info *mtd;
    int ret;

    /* Register the notifier if/when the first device type is
       registered, to prevent the link/init ordering from fucking
       us over. */
    if (!blktrans_notifier.list.next)
        register_mtd_user(&blktrans_notifier);   //为每个设备注册注册notifier  


    mutex_lock(&mtd_table_mutex);

    ret = register_blkdev(tr->major, tr->name);     //注册一个块设备,主设备号为31
    if (ret < 0) {
        printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
               tr->name, tr->major, ret);
        mutex_unlock(&mtd_table_mutex);
        return ret;
    }

    if (ret)
        tr->major = ret;

    tr->blkshift = ffs(tr->blksize) - 1;        //获取mtd设备偏移值

    INIT_LIST_HEAD(&tr->devs);                  //初始化mtd设备链表
    list_add(&tr->list, &blktrans_majors);      //添加到blktrans_majors链表中

    mtd_for_each_device(mtd)                    //遍历mtd_idr idr机制32叉树 查找添加到该树下的节点对应的mtd_info
        if (mtd->type != MTD_ABSENT)
            tr->add_mtd(tr, mtd);               //调用mtdblock_add_mtd

    mutex_unlock(&mtd_table_mutex);
    return 0;
}

1. 为每一个块设备注册一个notifier,之后将块设备添加到链表中

2. 注册一个块设备register_blkdev,主设备号为31

3. 最后通过寻找对于的mtd_info,判断是否是有效的Mtd设备后调用到对应的add_mtd,将设备添加。

void register_mtd_user (struct mtd_notifier *new)
{
    struct mtd_info *mtd;

    mutex_lock(&mtd_table_mutex);

    list_add(&new->list, &mtd_notifiers);       //将blktrans_notifier添加到mtd_notifiers

    __module_get(THIS_MODULE);

    mtd_for_each_device(mtd)                    //查找添加到该树下的节点对应的mtd_info调用mtdblock_add_mtd
        new->add(mtd);

    mutex_unlock(&mtd_table_mutex);
}

 这个将mtd_notifier添加到mtd_notifiers链表中,之后会在add_mtd_device中遍历该链表,也会调用到该notifier的add的接口,然后查找添加到树下的节点对应的mtd_info,调用对mtd_blktrans_ops应的add的接口。

static void blktrans_notify_add(struct mtd_info *mtd)
{
    struct mtd_blktrans_ops *tr;

    if (mtd->type == MTD_ABSENT)
        return;

    list_for_each_entry(tr, &blktrans_majors, list)
        tr->add_mtd(tr, mtd);
}

最终会调用到mtdblock_add_mtd来添加mtd的分区,对于mtd属于块设备,那么这部分应该会按照块设备的流程来,下面来看看代码

static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
{
    struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);

    if (!dev)
        return;

    dev->mbd.mtd = mtd;                 //mtd分区对象 
    dev->mbd.devnum = mtd->index;       //mtd分区编号

    dev->mbd.size = mtd->size >> 9;     //对齐
    dev->mbd.tr = tr;                   //mtd_blktrans_ops  

    if (!(mtd->flags & MTD_WRITEABLE))
        dev->mbd.readonly = 1;

    if (add_mtd_blktrans_dev(&dev->mbd))    //添加mtd_blktrans_dev设备
        kfree(dev);
}

这个函数简单的分配了一个mtd_blktrans_dev结构体,并做了相应的初始化,之后就调用了add_mtd_blktrans_dev函数将,其实好戏在这个add_mtd_blktrans_dev函数里面。

int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
{
    struct mtd_blktrans_ops *tr = new->tr;
    struct mtd_blktrans_dev *d;
    int last_devnum = -1;
    struct gendisk *gd;
    int ret;

    if (mutex_trylock(&mtd_table_mutex)) {
        mutex_unlock(&mtd_table_mutex);
        BUG();
    }

/*这段代码只是检查一下mtd block层中次设备号有没有被分区出去 
从前面的分析知道mtd block设备的主设备号为31,而mtd设备将加入 
mtd_talbe时的index作为mtd block设备的次设备号,这里需要首先 
检查一下这个次设备号是否被占用*/ 
    mutex_lock(&blktrans_ref_mutex);
    list_for_each_entry(d, &tr->devs, list) {
        if (new->devnum == -1) {
            /* Use first free number */
            if (d->devnum != last_devnum+1) {
                /* Found a free devnum. Plug it in here */
                new->devnum = last_devnum+1;
                list_add_tail(&new->list, &d->list);
                goto added;
            }
        } else if (d->devnum == new->devnum) {
            /* Required number taken */
            mutex_unlock(&blktrans_ref_mutex);
            return -EBUSY;
        } else if (d->devnum > new->devnum) {
            /* Required number was free */
            list_add_tail(&new->list, &d->list);
            goto added;
        }
        last_devnum = d->devnum;
    }

    ret = -EBUSY;
    if (new->devnum == -1)
        new->devnum = last_devnum+1;

    /* Check that the device and any partitions will get valid
     * minor numbers and that the disk naming code below can cope
     * with this number. */
    if (new->devnum > (MINORMASK >> tr->part_bits) ||
        (tr->part_bits && new->devnum >= 27 * 26)) {
        mutex_unlock(&blktrans_ref_mutex);
        goto error1;
    }

/* 1. 分配次设备号成功,将mtd_blktrans_dev链接到 
       mtdblock_tr->devs上面*/ 
    list_add_tail(&new->list, &tr->devs);
 added:
    mutex_unlock(&blktrans_ref_mutex);

    mutex_init(&new->lock);
    kref_init(&new->ref);
    if (!tr->writesect)
        new->readonly = 1;

    /* Create gendisk */
    ret = -ENOMEM;
/* 2. 在分配了mtd block设备后,这里又 
要将mtd block设备抽象成一个block设备,将它注册到block层*/ 
    gd = alloc_disk(1 << tr->part_bits);

    if (!gd)
        goto error2;


/* 3. 设置gendisk数据结构 */
    new->disk = gd;
    gd->private_data = new;
    gd->major = tr->major;
    gd->first_minor = (new->devnum) << tr->part_bits;
    gd->fops = &mtd_block_ops;

    if (tr->part_bits)
        if (new->devnum < 26)
            snprintf(gd->disk_name, sizeof(gd->disk_name),
                 "%s%c", tr->name, 'a' + new->devnum);
        else
            snprintf(gd->disk_name, sizeof(gd->disk_name),
                 "%s%c%c", tr->name,
                 'a' - 1 + new->devnum / 26,
                 'a' + new->devnum % 26);
    else
        snprintf(gd->disk_name, sizeof(gd->disk_name),
             "%s%d", tr->name, new->devnum);

/*4. 设置容量 */
    set_capacity(gd, (new->size * tr->blksize) >> 9);

/*5. 初始化一个块设备的请求的调度队列,所有的mtd块设备 
      都会用这个队列来完成IO操作 */
    spin_lock_init(&new->queue_lock);
    new->rq = blk_init_queue(mtd_blktrans_request, &new->queue_lock);

    if (!new->rq)
        goto error3;

    if (tr->flush)
        blk_queue_flush(new->rq, REQ_FLUSH);

    new->rq->queuedata = new;                //重点,后面会使用
    blk_queue_logical_block_size(new->rq, tr->blksize);

    queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq);
    queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq);

    if (tr->discard) {
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq);
        new->rq->limits.max_discard_sectors = UINT_MAX;
    }

    gd->queue = new->rq;

/*6. 初始化一个mtd block的工作队列,和处理mtd block 
      设备的IO请求有关系 */
    new->wq = alloc_workqueue("%s%d", 0, 0,
                  tr->name, new->mtd->index);
    if (!new->wq)
        goto error4;
    INIT_WORK(&new->work, mtd_blktrans_work);

    gd->driverfs_dev = &new->mtd->dev;

    if (new->readonly)
        set_disk_ro(gd, 1);
/*7 添加 disk */
    add_disk(gd);

    if (new->disk_attributes) {
        ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
                    new->disk_attributes);
        WARN_ON(ret);
    }
    return 0;
error4:
    blk_cleanup_queue(new->rq);
error3:
    put_disk(new->disk);
error2:
    list_del(&new->list);
error1:
    return ret;
}

看到这,我感觉有一种似曾相识的感觉,清风徐来,标准的一个块设备的注册流程,申请一个gendisk,设置,之后在注册,都是熟悉的味道啊,到此mtd以一个块设备被添加到内核中,那么它是如何完成请求的呢?

 

static void mtd_blktrans_request(struct request_queue *rq)
{
    struct mtd_blktrans_dev *dev;
    struct request *req = NULL;

    dev = rq->queuedata;    //dev=new

    if (!dev)
        while ((req = blk_fetch_request(rq)) != NULL)//默认方法处理请求
            __blk_end_request_all(req, -ENODEV);
    else
        queue_work(dev->wq, &dev->work);//使用队列处理请求
}

 

注册的处理请求队列会根据dev来选择是用默认的方法来处理请求队列呢?还是使用队列的方式来处理队列,而在add_mtd_blktrans_dev中已经将 rq->queuedata = new已经赋值,所以不为空,会使用队列的方式来处请求。

static void mtd_blktrans_work(struct work_struct *work)
{
    struct mtd_blktrans_dev *dev =
        container_of(work, struct mtd_blktrans_dev, work);
    struct mtd_blktrans_ops *tr = dev->tr;
    struct request_queue *rq = dev->rq;
    struct request *req = NULL;
    int background_done = 0;

    spin_lock_irq(rq->queue_lock);

    while (1) {
        int res;

        dev->bg_stop = false;
  /* 检查req和从请求队列中取出一个请求是否是有效 */ 
        if (!req && !(req = blk_fetch_request(rq))) {
            if (tr->background && !background_done) {
                spin_unlock_irq(rq->queue_lock);
                mutex_lock(&dev->lock);
                tr->background(dev);
                mutex_unlock(&dev->lock);
                spin_lock_irq(rq->queue_lock);
                /*
                 * Do background processing just once per idle
                 * period.
                 */
                background_done = !dev->bg_stop;
                continue;
            }
            break;
        }

        spin_unlock_irq(rq->queue_lock);

        mutex_lock(&dev->lock);
   /* 块设备请求处理函数 */
        res = do_blktrans_request(dev->tr, dev, req);
        mutex_unlock(&dev->lock);

        spin_lock_irq(rq->queue_lock);
/* 判读如果不是最后一个请求 */
        if (!__blk_end_request_cur(req, res))
            req = NULL;

        background_done = 0;
    }

    spin_unlock_irq(rq->queue_lock);
}

队列的处理函数也非常的简单,先判断req是否是有效的,如果有效,就调用do_blktrans_request来处理这些请求,根据块设备的驱动程序,提交io请求的时候,是经过优化的一大堆请求,所以请求处理完毕后,判断这个请求是否是最后一个,如果不是,就接着去调用do_blktrans_request来处理,直到请求处理完毕。

static int do_blktrans_request(struct mtd_blktrans_ops *tr,
                   struct mtd_blktrans_dev *dev,
                   struct request *req)
{
    unsigned long block, nsect;
    char *buf;

    block = blk_rq_pos(req) << 9 >> tr->blkshift;       //要处理的扇区
    nsect = blk_rq_cur_bytes(req) >> tr->blkshift;      //传送的扇区数目
    buf = bio_data(req->bio);

    if (req->cmd_type != REQ_TYPE_FS)
        return -EIO;

    if (req->cmd_flags & REQ_FLUSH)
        return tr->flush(dev);

    if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
        get_capacity(req->rq_disk))                     //检查是否在容量内
        return -EIO;

    if (req->cmd_flags & REQ_DISCARD)
        return tr->discard(dev, block, nsect);

    switch(rq_data_dir(req)) {
    case READ:
        for (; nsect > 0; nsect--, block++, buf += tr->blksize)
            if (tr->readsect(dev, block, buf))              //读操作
                return -EIO;
        rq_flush_dcache_pages(req);                         //flush all pages in a request
        return 0;
    case WRITE:
        if (!tr->writesect)
            return -EIO;

        rq_flush_dcache_pages(req);                         //flush all pages in a request
        for (; nsect > 0; nsect--, block++, buf += tr->blksize)
            if (tr->writesect(dev, block, buf))             //写操作
                return -EIO;
        return 0;
    default:
        printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
        return -EIO;
    }
}

对于处理请求的流程,如此之简单,经过简单的参数检查,调用对应的read/write接口就完成了块设备的读写操作。

 


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM