4: CARD层分析:
因为这些记忆卡都是块设备,当然需要提供块设备的驱动程序,这部分就是实现了将你的SD卡如何实现为块设备的。先看block.C中的probe函数
MMC 块设备用如下结构表示:
struct mmc_blk_data {
spinlock_t lock;
struct gendisk *disk;
struct mmc_queue queue;
unsigned int usage;
unsigned int read_only;
};
我们先看 mmc_blk_alloc( )
devidx = find_first_zero_bit(dev_use, MMC_NUM_MINORS);
if (devidx >= MMC_NUM_MINORS)//这表明我们的mmc 层对多支持16个card,每个card占8分区
return ERR_PTR(-ENOSPC);
__set_bit(devidx, dev_use);
md->disk = alloc_disk(1 << MMC_SHIFT);//分配一个磁盘,8个分区
//8 partion
if (md->disk == NULL) {
ret = -ENOMEM;
goto err_kfree;
}
spin_lock_init(&md->lock);
md->usage = 1;
ret = mmc_init_queue(&md->queue, card, &md->lock);//注一
if (ret)
goto err_putdisk;
md->queue.issue_fn = mmc_blk_issue_rq;//这个函数很重要,待会详细分析
md->queue.data = md;
md->disk->major = MMC_BLOCK_MAJOR;
md->disk->first_minor = devidx << MMC_SHIFT;
md->disk->fops = &mmc_bdops;磁盘的操作函数
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = &card->dev;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
*
* - be set for removable media with permanent block devices
* - be unset for removable block devices with permanent media
*
* Since MMC block devices clearly fall under the second
* case, we do not set GENHD_FL_REMOVABLE. Userspace
* should use the block device creation/destruction hotplug
* messages to tell when the card is present.
*/这个注释如何理解呢?
sprintf(md->disk->disk_name, "mmcblk%d", devidx);//这个名字将在/proc/device下出现
我们可以看到在/sys/block下有个"mmcblk0
blk_queue_hardsect_size(md->queue.queue, 512);//设置硬件扇区的容量
}
注一:
mq->queue = blk_init_queue(mmc_request, lock);初始化将request函数与队列绑定
if (!mq->queue)
return -ENOMEM;
mq->queue->queuedata = mq;
mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
//命令预处理,为驱动程序在返回evl_next_request之前,提供检查和预处理请求的机制,详细见LDD3 P485
//command prepare process
blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);//
//barrier request屏障请求,防止重新组合产生的错误,设置标准后,保证请求的数据及时写入到介质。
mq->sg = kmalloc(sizeof(struct scatterlist) *
host->max_phys_segs, GFP_KERNEL);
if (!mq->sg) {
ret = -ENOMEM;
goto cleanup_queue;
}
sg_init_table(mq->sg, host->max_phys_segs);
}
//分配scatterlist结构体
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");最后设置了一个内核线程,线程关联的函数是mmc_queue_thread,这个很重要,我们待会分析。
接下来调用 mmc_blk_set_blksize来设置block的长度为512。
一切都准备好了以后激活磁盘:add_disk(md->disk);
最后来分析request函数:
*
* Generic MMC request handler. This is called for any queue on a
* particular host. When the host is not busy, we look for a request
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.也就是说,elv_next_request返回来的
req不一定是mq->req
*/
static void mmc_request(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
int ret;
if (!mq) {
printk(KERN_ERR "MMC: killing requests for dead queue/n");
while ((req = elv_next_request(q)) != NULL) {
do {
ret = __blk_end_request(req, -EIO,
blk_rq_cur_bytes(req));// 没有可以处理的请求,则就素这个请求
} while (ret);
}
return;
}
if (!mq->req)
wake_up_process(mq->thread);//注一
}
注一:我们发现,与LDD3中介绍的块设备编程方法不同,并没有出来任何与bio结构相关的东西,当请求获取后,我们通过什么来进行数据块的传输呢,这里就是通过唤醒mq->thread线程来实现的,这个线程实际上就是mmc_queue_thread函数
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
current->flags |= PF_MEMALLOC;
down(&mq->thread_sem);
do {
struct request *req = NULL;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
if (!blk_queue_plugged(q))
req = elv_next_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
if (!req) {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
}
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
continue;
}
set_current_state(TASK_RUNNING);
// 蓝色部分不是很理解,大概的意思应该还是获取一个可处理的请求
mq->issue_fn(mq, req);//注一
} while (1);
up(&mq->thread_sem);
return 0;
}
注一:我们看看issue_fn函数做了些什么,这个函数相当复杂
我们看关键的部分:
brq.data.sg = mq->sg;
brq.data.sg_len = mmc_queue_map_sg(mq);
/*
* Adjust the sg list so it is the same size as the
* request.
*/
if (brq.data.blocks != req->nr_sectors) {
int i, data_size = brq.data.blocks << 9;
struct scatterlist *sg;
for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
data_size -= sg->length;
if (data_size <= 0) {
sg->length += data_size;
i++;
break;
}
}
brq.data.sg_len = i;
}
以上这些代码用来准备scatterlist,这是数据传输的缓冲区
mmc_wait_for_req(card->host, &brq.mrq);接下来我们向host发送请求,这个函数应该很熟悉了,它的最后一句就是 host->ops->request(host, mrq),这样就和我们驱动程序的request联系起来了,由于这次cmd—>data成员不再为空,所以启动的是数据传输了。
5 实验:
将默认的平台信息作了更改,这样
.get_ro = s3cmci_get_ro,
.get_cd = s3cmci_card_present,
两个函数就有实际的作用了
static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
/* This is currently here to avoid a number of if (host->pdata)
* checks. Any zero fields to ensure reaonable defaults are picked. */
.detect_invert=0,
.wprotect_invert=1,
.gpio_detect=1,
.gpio_wprotect = 1 ,
};
不过还有一点不清楚的是,
host_dodma 设置为1的时候,在/sdcard 下找不到任何东西 /proc/devices中也查找不到相应的设备
从打印的信息看:
7>mmc0: clock 0Hz busmode 1 powermode 1 cs 0 Vdd 21 width 0 timing 0
<6>s3c2440-sdi s3c2440-sdi: running at 0kHz (requested: 0kHz).
<7>mmc0: clock 197753Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0
<6>s3c2440-sdi s3c2440-sdi: running at 198kHz (requested: 197kHz).
<7>mmc0: clock 197753Hz busmode 1 powermode 2 cs 1 Vdd 21 width 0 timing 0
<6>s3c2440-sdi s3c2440-sdi: running at 198kHz (requested: 197kHz).
<7>mmc0: starting CMD0 arg 00000000 flags 000000c0
<7>s3c2440-sdi s3c2440-sdi: CMD[OK] #1 op:0 arg:0x00000000 flags:0x08c0 retries:0 R0:0x00000000
<7>mmc0: req done (CMD0): 0: 00000000 00000000 00000000 00000000
发送命令基本都是成功的,为什么会这样???
6结论:到此为止,按照数据和命令流的方向,我们分析了MMC 子系统的基本结构,很多细节的地方还不是很清楚,不过至少为写驱动程序做了相应的准备了。
阅读(869) | 评论(0) | 转发(1) |