分类: LINUX
2013-12-09 22:20:07
static int __make_request(struct request_queue *q, struct bio *bio) { struct request *req; int el_ret, nr_sectors; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); const int unplug = bio_unplug(bio); int rw_flags; nr_sectors = bio_sectors(bio); /* * low level driver can indicate that it wants pages above a * certain limit bounced to low memory (ie for highmem, or even * ISA dma in theory) */ blk_queue_bounce(q, &bio); spin_lock_irq(q->queue_lock); /*对于barrier IO操作和队列为空的情况,不需要 考虑合并了,直接申请新的request就可以了*/ if (unlikely(bio_barrier(bio)) || elv_queue_empty(q)) goto get_rq; /*试图将传入的bio和之前的请求合并*/ el_ret = elv_merge(q, &req, bio); switch (el_ret) { case ELEVATOR_BACK_MERGE: /*后向合并,合并成功后还可检查是否 新填入的bio正好填补了空缺,可以再次合并一下*/ BUG_ON(!rq_mergeable(req)); if (!ll_back_merge_fn(q, req, bio)) break; trace_block_bio_backmerge(q, bio); req->biotail->bi_next = bio; req->biotail = bio; req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; drive_stat_acct(req, 0); /*新合并的bio填补空缺口,正好后向的请求连成一片*/ if (!attempt_back_merge(q, req)) elv_merged_request(q, req, el_ret); goto out; case ELEVATOR_FRONT_MERGE: /*前向合并,并,检查新合并的bio是否正好填补了空缺和 前面的请求连成了一片,*/ BUG_ON(!rq_mergeable(req)); if (!ll_front_merge_fn(q, req, bio)) break; trace_block_bio_frontmerge(q, bio); bio->bi_next = req->bio; req->bio = bio; /* * may not be valid. if the low level driver said * it didn't need a bounce buffer then it better * not touch req->buffer either... */ req->buffer = bio_data(bio); req->current_nr_sectors = bio_cur_sectors(bio); req->hard_cur_sectors = req->current_nr_sectors; req->sector = req->hard_sector = bio->bi_sector; req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->ioprio = ioprio_best(req->ioprio, prio); if (!blk_rq_cpu_valid(req)) req->cpu = bio->bi_comp_cpu; drive_stat_acct(req, 0); /*试图再次前向合并*/ if (!attempt_front_merge(q, req)) elv_merged_request(q, req, el_ret); goto out; /* ELV_NO_MERGE: elevator says don't/can't merge. */ default: ; } get_rq: /* * This sync check and mask will be re-done in init_request_from_bio(), * but we need to set it earlier to expose the sync flag to the * rq allocator and io schedulers. */ rw_flags = bio_data_dir(bio); if (sync) rw_flags |= REQ_RW_SYNC; /* * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ /*不能合并的情况,申请新的request结构,放入到队列中去*/ req = get_request_wait(q, rw_flags, bio); /* * After dropping the lock and possibly sleeping here, our request * may now be mergeable after it had proven unmergeable (above). * We don't worry about that case for efficiency. It won't happen * often, and the elevators are able to handle it. */ init_request_from_bio(req, bio); spin_lock_irq(q->queue_lock); if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || bio_flagged(bio, BIO_CPU_AFFINE)) req->cpu = blk_cpu_to_group(smp_processor_id()); /*空队列,拔出设备,暂时缓存request请求*/ if (!blk_queue_nonrot(q) && elv_queue_empty(q)) blk_plug_device(q); /*将请求加入到队列*/ add_request(q, req); out: if (unplug || blk_queue_nonrot(q)) __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); return 0; } /* 块驱动尽量的想收集多个bio一起出来,所以映入了plug和unplug这两个概念 blk_plug_device 塞住设备plug,并设置一个定时器,等待一段时间再unplug设备 如果发送的请求太多,也会触发实际的写入操作, elv_insert函数中 if (nrq >= q->unplug_thresh) __generic_unplug_device(q); */ |