X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=block%2Fll_rw_blk.c;h=38c293b987b77212bec48450dd1529d71a3f582f;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=1ce88cf8ff91c47897358439524974f220a5b5a4;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 1ce88cf8f..38c293b98 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -10,7 +10,6 @@ /* * This handles all read/write requests to block devices */ -#include #include #include #include @@ -26,39 +25,38 @@ #include #include #include +#include #include #include +#include +#include /* * for max sense size */ #include -static void blk_unplug_work(void *data); +static void blk_unplug_work(struct work_struct *work); static void blk_unplug_timeout(unsigned long data); static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); static void init_request_from_bio(struct request *req, struct bio *bio); static int __make_request(request_queue_t *q, struct bio *bio); +static struct io_context *current_io_context(gfp_t gfp_flags, int node); /* * For the allocated request tables */ -static kmem_cache_t *request_cachep; +static struct kmem_cache *request_cachep; /* * For queue allocation */ -static kmem_cache_t *requestq_cachep; +static struct kmem_cache *requestq_cachep; /* * For io context allocations */ -static kmem_cache_t *iocontext_cachep; - -static wait_queue_head_t congestion_wqh[2] = { - __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), - __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) - }; +static struct kmem_cache *iocontext_cachep; /* * Controlling structure to kblockd @@ -111,35 +109,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q) q->nr_congestion_off = nr; } -/* - * A queue has just exitted congestion. Note this in the global counter of - * congested queues, and wake up anyone who was waiting for requests to be - * put back. - */ -static void clear_queue_congested(request_queue_t *q, int rw) -{ - enum bdi_state bit; - wait_queue_head_t *wqh = &congestion_wqh[rw]; - - bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; - clear_bit(bit, &q->backing_dev_info.state); - smp_mb__after_clear_bit(); - if (waitqueue_active(wqh)) - wake_up(wqh); -} - -/* - * A queue has just entered congestion. Flag that in the queue's VM-visible - * state flags and increment the global gounter of congested queues. - */ -static void set_queue_congested(request_queue_t *q, int rw) -{ - enum bdi_state bit; - - bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; - set_bit(bit, &q->backing_dev_info.state); -} - /** * blk_get_backing_dev_info - get the address of a queue's backing_dev_info * @bdev: device @@ -158,17 +127,8 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) ret = &q->backing_dev_info; return ret; } - EXPORT_SYMBOL(blk_get_backing_dev_info); -void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) -{ - q->activity_fn = fn; - q->activity_data = data; -} - -EXPORT_SYMBOL(blk_queue_activity_fn); - /** * blk_queue_prep_rq - set a prepare_request function for queue * @q: queue @@ -262,7 +222,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) if (q->unplug_delay == 0) q->unplug_delay = 1; - INIT_WORK(&q->unplug_work, blk_unplug_work, q); + INIT_WORK(&q->unplug_work, blk_unplug_work); q->unplug_timer.function = blk_unplug_timeout; q->unplug_timer.data = (unsigned long)q; @@ -271,25 +231,23 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - - blk_queue_activity_fn(q, NULL, NULL); } EXPORT_SYMBOL(blk_queue_make_request); -static inline void rq_init(request_queue_t *q, struct request *rq) +static void rq_init(request_queue_t *q, struct request *rq) { INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->donelist); rq->errors = 0; - rq->rq_status = RQ_ACTIVE; rq->bio = rq->biotail = NULL; + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); rq->ioprio = 0; rq->buffer = NULL; rq->ref_count = 1; rq->q = q; - rq->waiting = NULL; rq->special = NULL; rq->data_len = 0; rq->data = NULL; @@ -382,8 +340,8 @@ unsigned blk_ordered_req_seq(struct request *rq) if (rq == &q->post_flush_rq) return QUEUE_ORDSEQ_POSTFLUSH; - if ((rq->flags & REQ_ORDERED_COLOR) == - (q->orig_bar_rq->flags & REQ_ORDERED_COLOR)) + if ((rq->cmd_flags & REQ_ORDERED_COLOR) == + (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) return QUEUE_ORDSEQ_DRAIN; else return QUEUE_ORDSEQ_DONE; @@ -446,11 +404,11 @@ static void queue_flush(request_queue_t *q, unsigned which) end_io = post_flush_end_io; } + rq->cmd_flags = REQ_HARDBARRIER; rq_init(q, rq); - rq->flags = REQ_HARDBARRIER; rq->elevator_private = NULL; + rq->elevator_private2 = NULL; rq->rq_disk = q->bar_rq.rq_disk; - rq->rl = NULL; rq->end_io = end_io; q->prepare_flush_fn(q, rq); @@ -471,11 +429,13 @@ static inline struct request *start_ordered(request_queue_t *q, blkdev_dequeue_request(rq); q->orig_bar_rq = rq; rq = &q->bar_rq; + rq->cmd_flags = 0; rq_init(q, rq); - rq->flags = bio_data_dir(q->orig_bar_rq->bio); - rq->flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; + if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) + rq->cmd_flags |= REQ_RW; + rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0; rq->elevator_private = NULL; - rq->rl = NULL; + rq->elevator_private2 = NULL; init_request_from_bio(rq, q->orig_bar_rq->bio); rq->end_io = bar_end_io; @@ -587,8 +547,8 @@ static int flush_dry_bio_endio(struct bio *bio, unsigned int bytes, int error) return 0; } -static inline int ordered_bio_endio(struct request *rq, struct bio *bio, - unsigned int nbytes, int error) +static int ordered_bio_endio(struct request *rq, struct bio *bio, + unsigned int nbytes, int error) { request_queue_t *q = rq->q; bio_end_io_t *endio; @@ -784,6 +744,8 @@ void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); t->max_segment_size = min(t->max_segment_size,b->max_segment_size); t->hardsect_size = max(t->hardsect_size,b->hardsect_size); + if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) + clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); } EXPORT_SYMBOL(blk_queue_stack_limits); @@ -835,32 +797,24 @@ EXPORT_SYMBOL(blk_queue_dma_alignment); **/ struct request *blk_queue_find_tag(request_queue_t *q, int tag) { - struct blk_queue_tag *bqt = q->queue_tags; - - if (unlikely(bqt == NULL || tag >= bqt->real_max_depth)) - return NULL; - - return bqt->tag_index[tag]; + return blk_map_queue_find_tag(q->queue_tags, tag); } EXPORT_SYMBOL(blk_queue_find_tag); /** - * __blk_queue_free_tags - release tag maintenance info - * @q: the request queue for the device + * __blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free * - * Notes: - * blk_cleanup_queue() will take care of calling this function, if tagging - * has been used. So there's no need to call this directly. - **/ -static void __blk_queue_free_tags(request_queue_t *q) + * Tries to free the specified @bqt@. Returns true if it was + * actually freed and false if there are still references using it + */ +static int __blk_free_tags(struct blk_queue_tag *bqt) { - struct blk_queue_tag *bqt = q->queue_tags; - - if (!bqt) - return; + int retval; - if (atomic_dec_and_test(&bqt->refcnt)) { + retval = atomic_dec_and_test(&bqt->refcnt); + if (retval) { BUG_ON(bqt->busy); BUG_ON(!list_empty(&bqt->busy_list)); @@ -871,12 +825,49 @@ static void __blk_queue_free_tags(request_queue_t *q) bqt->tag_map = NULL; kfree(bqt); + } + return retval; +} + +/** + * __blk_queue_free_tags - release tag maintenance info + * @q: the request queue for the device + * + * Notes: + * blk_cleanup_queue() will take care of calling this function, if tagging + * has been used. So there's no need to call this directly. + **/ +static void __blk_queue_free_tags(request_queue_t *q) +{ + struct blk_queue_tag *bqt = q->queue_tags; + + if (!bqt) + return; + + __blk_free_tags(bqt); + q->queue_tags = NULL; q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); } + +/** + * blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free + * + * For externally managed @bqt@ frees the map. Callers of this + * function must guarantee to have released all the queues that + * might have been using this tag map. + */ +void blk_free_tags(struct blk_queue_tag *bqt) +{ + if (unlikely(!__blk_free_tags(bqt))) + BUG(); +} +EXPORT_SYMBOL(blk_free_tags); + /** * blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device @@ -899,23 +890,21 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth) unsigned long *tag_map; int nr_ulongs; - if (depth > q->nr_requests * 2) { + if (q && depth > q->nr_requests * 2) { depth = q->nr_requests * 2; printk(KERN_ERR "%s: adjusted depth to %d\n", __FUNCTION__, depth); } - tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC); + tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); if (!tag_index) goto fail; nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; - tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); + tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); if (!tag_map) goto fail; - memset(tag_index, 0, depth * sizeof(struct request *)); - memset(tag_map, 0, nr_ulongs * sizeof(unsigned long)); tags->real_max_depth = depth; tags->max_depth = depth; tags->tag_index = tag_index; @@ -927,6 +916,38 @@ fail: return -ENOMEM; } +static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, + int depth) +{ + struct blk_queue_tag *tags; + + tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); + if (!tags) + goto fail; + + if (init_tag_map(q, tags, depth)) + goto fail; + + INIT_LIST_HEAD(&tags->busy_list); + tags->busy = 0; + atomic_set(&tags->refcnt, 1); + return tags; +fail: + kfree(tags); + return NULL; +} + +/** + * blk_init_tags - initialize the tag info for an external tag map + * @depth: the maximum queue depth supported + * @tags: the tag to use + **/ +struct blk_queue_tag *blk_init_tags(int depth) +{ + return __blk_queue_init_tags(NULL, depth); +} +EXPORT_SYMBOL(blk_init_tags); + /** * blk_queue_init_tags - initialize the queue tag info * @q: the request queue for the device @@ -941,16 +962,10 @@ int blk_queue_init_tags(request_queue_t *q, int depth, BUG_ON(tags && q->queue_tags && tags != q->queue_tags); if (!tags && !q->queue_tags) { - tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); - if (!tags) - goto fail; + tags = __blk_queue_init_tags(q, depth); - if (init_tag_map(q, tags, depth)) + if (!tags) goto fail; - - INIT_LIST_HEAD(&tags->busy_list); - tags->busy = 0; - atomic_set(&tags->refcnt, 1); } else if (q->queue_tags) { if ((rc = blk_queue_resize_tags(q, depth))) return rc; @@ -1001,6 +1016,13 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth) return 0; } + /* + * Currently cannot replace a shared tag map with a new + * one, so error out if this is the case + */ + if (atomic_read(&bqt->refcnt) != 1) + return -EBUSY; + /* * save the old state info, so we can copy it back */ @@ -1057,7 +1079,7 @@ void blk_queue_end_tag(request_queue_t *q, struct request *rq) } list_del_init(&rq->queuelist); - rq->flags &= ~REQ_QUEUED; + rq->cmd_flags &= ~REQ_QUEUED; rq->tag = -1; if (unlikely(bqt->tag_index[tag] == NULL)) @@ -1093,7 +1115,7 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) struct blk_queue_tag *bqt = q->queue_tags; int tag; - if (unlikely((rq->flags & REQ_QUEUED))) { + if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", __FUNCTION__, rq, @@ -1101,13 +1123,18 @@ int blk_queue_start_tag(request_queue_t *q, struct request *rq) BUG(); } - tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); - if (tag >= bqt->max_depth) - return 1; + /* + * Protect against shared tag maps, as we may not have exclusive + * access to the tag map. + */ + do { + tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); + if (tag >= bqt->max_depth) + return 1; - __set_bit(tag, bqt->tag_map); + } while (test_and_set_bit(tag, bqt->tag_map)); - rq->flags |= REQ_QUEUED; + rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; blkdev_dequeue_request(rq); @@ -1143,65 +1170,31 @@ void blk_queue_invalidate_tags(request_queue_t *q) printk(KERN_ERR "%s: bad tag found on list\n", __FUNCTION__); list_del_init(&rq->queuelist); - rq->flags &= ~REQ_QUEUED; + rq->cmd_flags &= ~REQ_QUEUED; } else blk_queue_end_tag(q, rq); - rq->flags &= ~REQ_STARTED; + rq->cmd_flags &= ~REQ_STARTED; __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); } } EXPORT_SYMBOL(blk_queue_invalidate_tags); -static const char * const rq_flags[] = { - "REQ_RW", - "REQ_FAILFAST", - "REQ_SORTED", - "REQ_SOFTBARRIER", - "REQ_HARDBARRIER", - "REQ_FUA", - "REQ_CMD", - "REQ_NOMERGE", - "REQ_STARTED", - "REQ_DONTPREP", - "REQ_QUEUED", - "REQ_ELVPRIV", - "REQ_PC", - "REQ_BLOCK_PC", - "REQ_SENSE", - "REQ_FAILED", - "REQ_QUIET", - "REQ_SPECIAL", - "REQ_DRIVE_CMD", - "REQ_DRIVE_TASK", - "REQ_DRIVE_TASKFILE", - "REQ_PREEMPT", - "REQ_PM_SUSPEND", - "REQ_PM_RESUME", - "REQ_PM_SHUTDOWN", - "REQ_ORDERED_COLOR", -}; - void blk_dump_rq_flags(struct request *rq, char *msg) { int bit; - printk("%s: dev %s: flags = ", msg, - rq->rq_disk ? rq->rq_disk->disk_name : "?"); - bit = 0; - do { - if (rq->flags & (1 << bit)) - printk("%s ", rq_flags[bit]); - bit++; - } while (bit < __REQ_NR_BITS); + printk("%s: dev %s: type=%x, flags=%x\n", msg, + rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, + rq->cmd_flags); printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, rq->nr_sectors, rq->current_nr_sectors); printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); - if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) { + if (blk_pc_request(rq)) { printk("cdb: "); for (bit = 0; bit < sizeof(rq->cmd); bit++) printk("%02x ", rq->cmd[bit]); @@ -1271,7 +1264,7 @@ new_hw_segment: bio->bi_hw_segments = nr_hw_segs; bio->bi_flags |= (1 << BIO_SEG_VALID); } - +EXPORT_SYMBOL(blk_recount_segments); static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, struct bio *nxt) @@ -1374,7 +1367,7 @@ static inline int ll_new_mergeable(request_queue_t *q, int nr_phys_segs = bio_phys_segments(q, bio); if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1397,7 +1390,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1412,8 +1405,7 @@ static inline int ll_new_hw_segment(request_queue_t *q, return 1; } -static int ll_back_merge_fn(request_queue_t *q, struct request *req, - struct bio *bio) +int ll_back_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) { unsigned short max_sectors; int len; @@ -1424,7 +1416,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, max_sectors = q->max_sectors; if (req->nr_sectors + bio_sectors(bio) > max_sectors) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1449,6 +1441,7 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, return ll_new_hw_segment(q, req, bio); } +EXPORT_SYMBOL(ll_back_merge_fn); static int ll_front_merge_fn(request_queue_t *q, struct request *req, struct bio *bio) @@ -1463,7 +1456,7 @@ static int ll_front_merge_fn(request_queue_t *q, struct request *req, if (req->nr_sectors + bio_sectors(bio) > max_sectors) { - req->flags |= REQ_NOMERGE; + req->cmd_flags |= REQ_NOMERGE; if (req == q->last_merge) q->last_merge = NULL; return 0; @@ -1553,11 +1546,13 @@ void blk_plug_device(request_queue_t *q) * don't plug a stopped queue, it must be paired with blk_start_queue() * which will restart the queueing */ - if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) + if (blk_queue_stopped(q)) return; - if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) + if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); + blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); + } } EXPORT_SYMBOL(blk_plug_device); @@ -1584,7 +1579,7 @@ EXPORT_SYMBOL(blk_remove_plug); */ void __generic_unplug_device(request_queue_t *q) { - if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))) + if (unlikely(blk_queue_stopped(q))) return; if (!blk_remove_plug(q)) @@ -1621,13 +1616,20 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, /* * devices don't necessarily have an ->unplug_fn defined */ - if (q->unplug_fn) + if (q->unplug_fn) { + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + q->unplug_fn(q); + } } -static void blk_unplug_work(void *data) +static void blk_unplug_work(struct work_struct *work) { - request_queue_t *q = data; + request_queue_t *q = container_of(work, request_queue_t, unplug_work); + + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); q->unplug_fn(q); } @@ -1636,6 +1638,9 @@ static void blk_unplug_timeout(unsigned long data) { request_queue_t *q = (request_queue_t *)data; + blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, + q->rq.count[READ] + q->rq.count[WRITE]); + kblockd_schedule_work(&q->unplug_work); } @@ -1650,6 +1655,8 @@ static void blk_unplug_timeout(unsigned long data) **/ void blk_start_queue(request_queue_t *q) { + WARN_ON(!irqs_disabled()); + clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); /* @@ -1740,7 +1747,7 @@ EXPORT_SYMBOL(blk_run_queue); /** * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed - * @q: the request queue to be released + * @kobj: the kobj belonging of the request queue to be released * * Description: * blk_cleanup_queue is the pair to blk_init_queue() or @@ -1753,16 +1760,11 @@ EXPORT_SYMBOL(blk_run_queue); * Hopefully the low level driver will have finished any * outstanding requests first... **/ -void blk_cleanup_queue(request_queue_t * q) +static void blk_release_queue(struct kobject *kobj) { + request_queue_t *q = container_of(kobj, struct request_queue, kobj); struct request_list *rl = &q->rq; - if (!atomic_dec_and_test(&q->refcnt)) - return; - - if (q->elevator) - elevator_exit(q->elevator); - blk_sync_queue(q); if (rl->rq_pool) @@ -1771,9 +1773,29 @@ void blk_cleanup_queue(request_queue_t * q) if (q->queue_tags) __blk_queue_free_tags(q); + blk_trace_shutdown(q); + kmem_cache_free(requestq_cachep, q); } +void blk_put_queue(request_queue_t *q) +{ + kobject_put(&q->kobj); +} +EXPORT_SYMBOL(blk_put_queue); + +void blk_cleanup_queue(request_queue_t * q) +{ + mutex_lock(&q->sysfs_lock); + set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); + mutex_unlock(&q->sysfs_lock); + + if (q->elevator) + elevator_exit(q->elevator); + + blk_put_queue(q); +} + EXPORT_SYMBOL(blk_cleanup_queue); static int blk_init_free_list(request_queue_t *q) @@ -1801,6 +1823,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask) } EXPORT_SYMBOL(blk_alloc_queue); +static struct kobj_type queue_ktype; + request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) { request_queue_t *q; @@ -1811,11 +1835,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) memset(q, 0, sizeof(*q)); init_timer(&q->unplug_timer); - atomic_set(&q->refcnt, 1); + + snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); + q->kobj.ktype = &queue_ktype; + kobject_init(&q->kobj); q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; q->backing_dev_info.unplug_io_data = q; + mutex_init(&q->sysfs_lock); + return q; } EXPORT_SYMBOL(blk_alloc_queue_node); @@ -1842,7 +1871,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * get dealt with eventually. * * The queue spin lock must be held while manipulating the requests on the - * request queue. + * request queue; this lock will be taken also from interrupt context, so irq + * disabling is needed for it. * * Function returns a pointer to the initialized request queue, or NULL if * it didn't succeed. @@ -1867,8 +1897,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) return NULL; q->node = node_id; - if (blk_init_free_list(q)) - goto out_init; + if (blk_init_free_list(q)) { + kmem_cache_free(requestq_cachep, q); + return NULL; + } /* * if caller didn't supply a lock, they get per-queue locking with @@ -1880,9 +1912,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) } q->request_fn = rfn; - q->back_merge_fn = ll_back_merge_fn; - q->front_merge_fn = ll_front_merge_fn; - q->merge_requests_fn = ll_merge_requests_fn; q->prep_rq_fn = NULL; q->unplug_fn = generic_unplug_device; q->queue_flags = (1 << QUEUE_FLAG_CLUSTER); @@ -1904,9 +1933,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) return q; } - blk_cleanup_queue(q); -out_init: - kmem_cache_free(requestq_cachep, q); + blk_put_queue(q); return NULL; } EXPORT_SYMBOL(blk_init_queue_node); @@ -1914,7 +1941,7 @@ EXPORT_SYMBOL(blk_init_queue_node); int blk_get_queue(request_queue_t *q) { if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { - atomic_inc(&q->refcnt); + kobject_get(&q->kobj); return 0; } @@ -1925,14 +1952,13 @@ EXPORT_SYMBOL(blk_get_queue); static inline void blk_free_request(request_queue_t *q, struct request *rq) { - if (rq->flags & REQ_ELVPRIV) + if (rq->cmd_flags & REQ_ELVPRIV) elv_put_request(q, rq); mempool_free(rq, q->rq.rq_pool); } -static inline struct request * -blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, - int priv, gfp_t gfp_mask) +static struct request * +blk_alloc_request(request_queue_t *q, int rw, int priv, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -1940,17 +1966,17 @@ blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, return NULL; /* - * first three bits are identical in rq->flags and bio->bi_rw, + * first three bits are identical in rq->cmd_flags and bio->bi_rw, * see bio.h and blkdev.h */ - rq->flags = rw; + rq->cmd_flags = rw | REQ_ALLOCED; if (priv) { - if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) { + if (unlikely(elv_set_request(q, rq, gfp_mask))) { mempool_free(rq, q->rq.rq_pool); return NULL; } - rq->flags |= REQ_ELVPRIV; + rq->cmd_flags |= REQ_ELVPRIV; } return rq; @@ -1995,7 +2021,7 @@ static void __freed_request(request_queue_t *q, int rw) struct request_list *rl = &q->rq; if (rl->count[rw] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, rw); + blk_clear_queue_congested(q, rw); if (rl->count[rw] + 1 <= q->nr_requests) { if (waitqueue_active(&rl->wait[rw])) @@ -2029,21 +2055,22 @@ static void freed_request(request_queue_t *q, int rw, int priv) * Returns NULL on failure, with queue_lock held. * Returns !NULL on success, with queue_lock *not held*. */ -static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, - gfp_t gfp_mask) +static struct request *get_request(request_queue_t *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; struct io_context *ioc = NULL; + const int rw = rw_flags & 0x01; int may_queue, priv; - may_queue = elv_may_queue(q, rw, bio); + may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { if (rl->count[rw]+1 >= q->nr_requests) { - ioc = current_io_context(GFP_ATOMIC); + ioc = current_io_context(GFP_ATOMIC, q->node); /* * The queue will fill after this allocation, so set * it as full, and mark this process as "batching". @@ -2065,7 +2092,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, } } } - set_queue_congested(q, rw); + blk_set_queue_congested(q, rw); } /* @@ -2085,7 +2112,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw, bio, priv, gfp_mask); + rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything @@ -2121,7 +2148,8 @@ rq_starved: ioc->nr_batch_requests--; rq_init(q, rq); - rq->rl = rl; + + blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); out: return rq; } @@ -2132,12 +2160,13 @@ out: * * Called with q->queue_lock held, and returns with it unlocked. */ -static struct request *get_request_wait(request_queue_t *q, int rw, +static struct request *get_request_wait(request_queue_t *q, int rw_flags, struct bio *bio) { + const int rw = rw_flags & 0x01; struct request *rq; - rq = get_request(q, rw, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_NOIO); while (!rq) { DEFINE_WAIT(wait); struct request_list *rl = &q->rq; @@ -2145,11 +2174,13 @@ static struct request *get_request_wait(request_queue_t *q, int rw, prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - rq = get_request(q, rw, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_NOIO); if (!rq) { struct io_context *ioc; + blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); + __generic_unplug_device(q); spin_unlock_irq(q->queue_lock); io_schedule(); @@ -2160,7 +2191,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, * up to a big batch of them for a small period time. * See ioc_batching, ioc_set_batching */ - ioc = current_io_context(GFP_NOIO); + ioc = current_io_context(GFP_NOIO, q->node); ioc_set_batching(q, ioc); spin_lock_irq(q->queue_lock); @@ -2191,6 +2222,25 @@ struct request *blk_get_request(request_queue_t *q, int rw, gfp_t gfp_mask) } EXPORT_SYMBOL(blk_get_request); +/** + * blk_start_queueing - initiate dispatch of requests to device + * @q: request queue to kick into gear + * + * This is basically a helper to remove the need to know whether a queue + * is plugged or not if someone just wants to initiate dispatch of requests + * for this queue. + * + * The queue lock must be held with interrupts disabled. + */ +void blk_start_queueing(request_queue_t *q) +{ + if (!blk_queue_plugged(q)) + q->request_fn(q); + else + __generic_unplug_device(q); +} +EXPORT_SYMBOL(blk_start_queueing); + /** * blk_requeue_request - put a request back on queue * @q: request queue where request should be inserted @@ -2203,6 +2253,8 @@ EXPORT_SYMBOL(blk_get_request); */ void blk_requeue_request(request_queue_t *q, struct request *rq) { + blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); + if (blk_rq_tagged(rq)) blk_queue_end_tag(q, rq); @@ -2241,7 +2293,8 @@ void blk_insert_request(request_queue_t *q, struct request *rq, * must not attempt merges on this) and that it acts as a soft * barrier */ - rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER; + rq->cmd_type = REQ_TYPE_SPECIAL; + rq->cmd_flags |= REQ_SOFTBARRIER; rq->special = data; @@ -2255,16 +2308,79 @@ void blk_insert_request(request_queue_t *q, struct request *rq, drive_stat_acct(rq, rq->nr_sectors, 1); __elv_add_request(q, rq, where, 0); - - if (blk_queue_plugged(q)) - __generic_unplug_device(q); - else - q->request_fn(q); + blk_start_queueing(q); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_insert_request); +static int __blk_rq_unmap_user(struct bio *bio) +{ + int ret = 0; + + if (bio) { + if (bio_flagged(bio, BIO_USER_MAPPED)) + bio_unmap_user(bio); + else + ret = bio_uncopy_user(bio); + } + + return ret; +} + +static int __blk_rq_map_user(request_queue_t *q, struct request *rq, + void __user *ubuf, unsigned int len) +{ + unsigned long uaddr; + struct bio *bio, *orig_bio; + int reading, ret; + + reading = rq_data_dir(rq) == READ; + + /* + * if alignment requirement is satisfied, map in user pages for + * direct dma. else, set up kernel bounce buffers + */ + uaddr = (unsigned long) ubuf; + if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) + bio = bio_map_user(q, NULL, uaddr, len, reading); + else + bio = bio_copy_user(q, uaddr, len, reading); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + orig_bio = bio; + blk_queue_bounce(q, &bio); + + /* + * We link the bounce buffer in and could have to traverse it + * later so we have to get a ref to prevent it from being freed + */ + bio_get(bio); + + if (!rq->bio) + blk_rq_bio_prep(q, rq, bio); + else if (!ll_back_merge_fn(q, rq, bio)) { + ret = -EINVAL; + goto unmap_bio; + } else { + rq->biotail->bi_next = bio; + rq->biotail = bio; + + rq->data_len += bio->bi_size; + } + + return bio->bi_size; + +unmap_bio: + /* if it was boucned we must call the end io function */ + bio_endio(bio, bio->bi_size, 0); + __blk_rq_unmap_user(orig_bio); + bio_put(bio); + return ret; +} + /** * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage * @q: request queue where request should be inserted @@ -2286,42 +2402,47 @@ EXPORT_SYMBOL(blk_insert_request); * unmapping. */ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, - unsigned int len) + unsigned long len) { - unsigned long uaddr; - struct bio *bio; - int reading; + unsigned long bytes_read = 0; + struct bio *bio = NULL; + int ret; if (len > (q->max_hw_sectors << 9)) return -EINVAL; if (!len || !ubuf) return -EINVAL; - reading = rq_data_dir(rq) == READ; + while (bytes_read != len) { + unsigned long map_len, end, start; - /* - * if alignment requirement is satisfied, map in user pages for - * direct dma. else, set up kernel bounce buffers - */ - uaddr = (unsigned long) ubuf; - if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) - bio = bio_map_user(q, NULL, uaddr, len, reading); - else - bio = bio_copy_user(q, uaddr, len, reading); + map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); + end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) + >> PAGE_SHIFT; + start = (unsigned long)ubuf >> PAGE_SHIFT; - if (!IS_ERR(bio)) { - rq->bio = rq->biotail = bio; - blk_rq_bio_prep(q, rq, bio); + /* + * A bad offset could cause us to require BIO_MAX_PAGES + 1 + * pages. If this happens we just lower the requested + * mapping len by a page so that we can fit + */ + if (end - start > BIO_MAX_PAGES) + map_len -= PAGE_SIZE; - rq->buffer = rq->data = NULL; - rq->data_len = len; - return 0; + ret = __blk_rq_map_user(q, rq, ubuf, map_len); + if (ret < 0) + goto unmap_rq; + if (!bio) + bio = rq->bio; + bytes_read += ret; + ubuf += ret; } - /* - * bio is the err-ptr - */ - return PTR_ERR(bio); + rq->buffer = rq->data = NULL; + return 0; +unmap_rq: + blk_rq_unmap_user(bio); + return ret; } EXPORT_SYMBOL(blk_rq_map_user); @@ -2332,6 +2453,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * @rq: request to map data to * @iov: pointer to the iovec * @iov_count: number of elements in the iovec + * @len: I/O byte count * * Description: * Data will be mapped directly for zero copy io, if possible. Otherwise @@ -2347,7 +2469,7 @@ EXPORT_SYMBOL(blk_rq_map_user); * unmapping. */ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, - struct sg_iovec *iov, int iov_count) + struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; @@ -2361,10 +2483,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq, if (IS_ERR(bio)) return PTR_ERR(bio); - rq->bio = rq->biotail = bio; + if (bio->bi_size != len) { + bio_endio(bio, bio->bi_size, 0); + bio_unmap_user(bio); + return -EINVAL; + } + + bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = rq->data = NULL; - rq->data_len = bio->bi_size; return 0; } @@ -2372,24 +2499,33 @@ EXPORT_SYMBOL(blk_rq_map_user_iov); /** * blk_rq_unmap_user - unmap a request with user data - * @bio: bio to be unmapped - * @ulen: length of user buffer + * @bio: start of bio list * * Description: - * Unmap a bio previously mapped by blk_rq_map_user(). + * Unmap a rq previously mapped by blk_rq_map_user(). The caller must + * supply the original rq->bio from the blk_rq_map_user() return, since + * the io completion may have changed rq->bio. */ -int blk_rq_unmap_user(struct bio *bio, unsigned int ulen) +int blk_rq_unmap_user(struct bio *bio) { - int ret = 0; + struct bio *mapped_bio; + int ret = 0, ret2; - if (bio) { - if (bio_flagged(bio, BIO_USER_MAPPED)) - bio_unmap_user(bio); - else - ret = bio_uncopy_user(bio); + while (bio) { + mapped_bio = bio; + if (unlikely(bio_flagged(bio, BIO_BOUNCED))) + mapped_bio = bio->bi_private; + + ret2 = __blk_rq_unmap_user(mapped_bio); + if (ret2 && !ret) + ret = ret2; + + mapped_bio = bio; + bio = bio->bi_next; + bio_put(mapped_bio); } - return 0; + return ret; } EXPORT_SYMBOL(blk_rq_unmap_user); @@ -2419,11 +2555,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, if (rq_data_dir(rq) == WRITE) bio->bi_rw |= (1 << BIO_RW); - rq->bio = rq->biotail = bio; blk_rq_bio_prep(q, rq, bio); - rq->buffer = rq->data = NULL; - rq->data_len = len; return 0; } @@ -2448,12 +2581,14 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; rq->rq_disk = bd_disk; - rq->flags |= REQ_NOMERGE; + rq->cmd_flags |= REQ_NOMERGE; rq->end_io = done; - elv_add_request(q, rq, where, 1); - generic_unplug_device(q); + WARN_ON(irqs_disabled()); + spin_lock_irq(q->queue_lock); + __elv_add_request(q, rq, where, 1); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); } - EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); /** @@ -2470,7 +2605,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, struct request *rq, int at_head) { - DECLARE_COMPLETION(wait); + DECLARE_COMPLETION_ONSTACK(wait); char sense[SCSI_SENSE_BUFFERSIZE]; int err = 0; @@ -2486,10 +2621,9 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, rq->sense_len = 0; } - rq->waiting = &wait; + rq->end_io_data = &wait; blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); wait_for_completion(&wait); - rq->waiting = NULL; if (rq->errors) err = -EIO; @@ -2551,9 +2685,6 @@ static inline void add_request(request_queue_t * q, struct request * req) { drive_stat_acct(req, req->nr_sectors, 1); - if (q->activity_fn) - q->activity_fn(q->activity_data, rq_data_dir(req)); - /* * elevator indicated where it wants this request to be * inserted at elevator_merge time @@ -2598,8 +2729,6 @@ EXPORT_SYMBOL_GPL(disk_round_stats); */ void __blk_put_request(request_queue_t *q, struct request *req) { - struct request_list *rl = req->rl; - if (unlikely(!q)) return; if (unlikely(--req->ref_count)) @@ -2607,18 +2736,16 @@ void __blk_put_request(request_queue_t *q, struct request *req) elv_completed_request(q, req); - req->rq_status = RQ_INACTIVE; - req->rl = NULL; - /* * Request may not have originated from ll_rw_blk. if not, * it didn't come out of our reserved rq pools */ - if (rl) { + if (req->cmd_flags & REQ_ALLOCED) { int rw = rq_data_dir(req); - int priv = req->flags & REQ_ELVPRIV; + int priv = req->cmd_flags & REQ_ELVPRIV; BUG_ON(!list_empty(&req->queuelist)); + BUG_ON(!hlist_unhashed(&req->hash)); blk_free_request(q, req); freed_request(q, rw, priv); @@ -2652,9 +2779,9 @@ EXPORT_SYMBOL(blk_put_request); */ void blk_end_sync_rq(struct request *rq, int error) { - struct completion *waiting = rq->waiting; + struct completion *waiting = rq->end_io_data; - rq->waiting = NULL; + rq->end_io_data = NULL; __blk_put_request(rq->q, rq); /* @@ -2665,29 +2792,6 @@ void blk_end_sync_rq(struct request *rq, int error) } EXPORT_SYMBOL(blk_end_sync_rq); -/** - * blk_congestion_wait - wait for a queue to become uncongested - * @rw: READ or WRITE - * @timeout: timeout in jiffies - * - * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion. - * If no queues are congested then just wait for the next request to be - * returned. - */ -long blk_congestion_wait(int rw, long timeout) -{ - long ret; - DEFINE_WAIT(wait); - wait_queue_head_t *wqh = &congestion_wqh[rw]; - - prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); - ret = io_schedule_timeout(timeout); - finish_wait(wqh, &wait); - return ret; -} - -EXPORT_SYMBOL(blk_congestion_wait); - /* * Has to be called with the request spinlock acquired */ @@ -2698,14 +2802,14 @@ static int attempt_merge(request_queue_t *q, struct request *req, return 0; /* - * not contigious + * not contiguous */ if (req->sector + req->nr_sectors != next->sector) return 0; if (rq_data_dir(req) != rq_data_dir(next) || req->rq_disk != next->rq_disk - || next->waiting || next->special) + || next->special) return 0; /* @@ -2714,7 +2818,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, * will have updated segment counts, update sector * counts here. */ - if (!q->merge_requests_fn(q, req, next)) + if (!ll_merge_requests_fn(q, req, next)) return 0; /* @@ -2766,19 +2870,24 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq) static void init_request_from_bio(struct request *req, struct bio *bio) { - req->flags |= REQ_CMD; + req->cmd_type = REQ_TYPE_FS; /* * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST) */ if (bio_rw_ahead(bio) || bio_failfast(bio)) - req->flags |= REQ_FAILFAST; + req->cmd_flags |= REQ_FAILFAST; /* * REQ_BARRIER implies no merging, but lets make it explicit */ if (unlikely(bio_barrier(bio))) - req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + + if (bio_sync(bio)) + req->cmd_flags |= REQ_RW_SYNC; + if (bio_rw_meta(bio)) + req->cmd_flags |= REQ_RW_META; req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; @@ -2787,7 +2896,6 @@ static void init_request_from_bio(struct request *req, struct bio *bio) req->nr_phys_segments = bio_phys_segments(req->q, bio); req->nr_hw_segments = bio_hw_segments(req->q, bio); req->buffer = bio_data(bio); /* see ->buffer comment above */ - req->waiting = NULL; req->bio = req->biotail = bio; req->ioprio = bio_prio(bio); req->rq_disk = bio->bi_bdev->bd_disk; @@ -2797,17 +2905,12 @@ static void init_request_from_bio(struct request *req, struct bio *bio) static int __make_request(request_queue_t *q, struct bio *bio) { struct request *req; - int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; - unsigned short prio; - sector_t sector; + int el_ret, nr_sectors, barrier, err; + const unsigned short prio = bio_prio(bio); + const int sync = bio_sync(bio); + int rw_flags; - sector = bio->bi_sector; nr_sectors = bio_sectors(bio); - cur_nr_sectors = bio_cur_sectors(bio); - prio = bio_prio(bio); - - rw = bio_data_dir(bio); - sync = bio_sync(bio); /* * low level driver can indicate that it wants pages above a @@ -2816,8 +2919,6 @@ static int __make_request(request_queue_t *q, struct bio *bio) */ blk_queue_bounce(q, &bio); - spin_lock_prefetch(q->queue_lock); - barrier = bio_barrier(bio); if (unlikely(barrier) && (q->next_ordered == QUEUE_ORDERED_NONE)) { err = -EOPNOTSUPP; @@ -2834,24 +2935,28 @@ static int __make_request(request_queue_t *q, struct bio *bio) case ELEVATOR_BACK_MERGE: BUG_ON(!rq_mergeable(req)); - if (!q->back_merge_fn(q, req, bio)) + if (!ll_back_merge_fn(q, req, bio)) break; + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); + req->biotail->bi_next = bio; req->biotail = bio; req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); if (!attempt_back_merge(q, req)) - elv_merged_request(q, req); + elv_merged_request(q, req, el_ret); goto out; case ELEVATOR_FRONT_MERGE: BUG_ON(!rq_mergeable(req)); - if (!q->front_merge_fn(q, req, bio)) + if (!ll_front_merge_fn(q, req, bio)) break; + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); + bio->bi_next = req->bio; req->bio = bio; @@ -2861,14 +2966,14 @@ static int __make_request(request_queue_t *q, struct bio *bio) * not touch req->buffer either... */ req->buffer = bio_data(bio); - req->current_nr_sectors = cur_nr_sectors; - req->hard_cur_sectors = cur_nr_sectors; - req->sector = req->hard_sector = sector; + req->current_nr_sectors = bio_cur_sectors(bio); + req->hard_cur_sectors = req->current_nr_sectors; + req->sector = req->hard_sector = bio->bi_sector; req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); if (!attempt_front_merge(q, req)) - elv_merged_request(q, req); + elv_merged_request(q, req, el_ret); goto out; /* ELV_NO_MERGE: elevator says don't/can't merge. */ @@ -2877,11 +2982,20 @@ static int __make_request(request_queue_t *q, struct bio *bio) } get_rq: + /* + * This sync check and mask will be re-done in init_request_from_bio(), + * but we need to set it earlier to expose the sync flag to the + * rq allocator and io schedulers. + */ + rw_flags = bio_data_dir(bio); + if (sync) + rw_flags |= REQ_RW_SYNC; + /* * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, rw, bio); + req = get_request_wait(q, rw_flags, bio); /* * After dropping the lock and possibly sleeping here, our request @@ -2940,6 +3054,42 @@ static void handle_bad_sector(struct bio *bio) set_bit(BIO_EOF, &bio->bi_flags); } +#ifdef CONFIG_FAIL_MAKE_REQUEST + +static DECLARE_FAULT_ATTR(fail_make_request); + +static int __init setup_fail_make_request(char *str) +{ + return setup_fault_attr(&fail_make_request, str); +} +__setup("fail_make_request=", setup_fail_make_request); + +static int should_fail_request(struct bio *bio) +{ + if ((bio->bi_bdev->bd_disk->flags & GENHD_FL_FAIL) || + (bio->bi_bdev->bd_part && bio->bi_bdev->bd_part->make_it_fail)) + return should_fail(&fail_make_request, bio->bi_size); + + return 0; +} + +static int __init fail_make_request_debugfs(void) +{ + return init_fault_attr_dentries(&fail_make_request, + "fail_make_request"); +} + +late_initcall(fail_make_request_debugfs); + +#else /* CONFIG_FAIL_MAKE_REQUEST */ + +static inline int should_fail_request(struct bio *bio) +{ + return 0; +} + +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + /** * generic_make_request: hand a buffer to its device driver for I/O * @bio: The bio describing the location in memory and on the device. @@ -2968,7 +3118,9 @@ void generic_make_request(struct bio *bio) { request_queue_t *q; sector_t maxsector; + sector_t old_sector; int ret, nr_sectors = bio_sectors(bio); + dev_t old_dev; might_sleep(); /* Test device or partition size, when known. */ @@ -2995,6 +3147,8 @@ void generic_make_request(struct bio *bio) * NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ + old_sector = -1; + old_dev = 0; do { char b[BDEVNAME_SIZE]; @@ -3021,12 +3175,40 @@ end_io: if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) goto end_io; + if (should_fail_request(bio)) + goto end_io; + /* * If this device has partitions, remap block n * of partition p to block n+start(p) of the disk. */ blk_partition_remap(bio); + if (old_sector != -1) + blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, + old_sector); + + blk_add_trace_bio(q, bio, BLK_TA_QUEUE); + + old_sector = bio->bi_sector; + old_dev = bio->bi_bdev->bd_dev; + + maxsector = bio->bi_bdev->bd_inode->i_size >> 9; + if (maxsector) { + sector_t sector = bio->bi_sector; + + if (maxsector < nr_sectors || + maxsector - nr_sectors < sector) { + /* + * This may well happen - partitions are not + * checked to make sure they are within the size + * of the whole device. + */ + handle_bad_sector(bio); + goto end_io; + } + } + ret = q->make_request_fn(q, bio); } while (ret); } @@ -3050,10 +3232,12 @@ void submit_bio(int rw, struct bio *bio) BIO_BUG_ON(!bio->bi_size); BIO_BUG_ON(!bio->bi_io_vec); bio->bi_rw |= rw; - if (rw & WRITE) - mod_page_state(pgpgout, count); - else - mod_page_state(pgpgin, count); + if (rw & WRITE) { + count_vm_events(PGPGOUT, count); + } else { + task_io_account_read(bio->bi_size); + count_vm_events(PGPGIN, count); + } if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; @@ -3146,6 +3330,8 @@ static int __end_that_request_first(struct request *req, int uptodate, int total_bytes, bio_nbytes, error, next_idx = 0; struct bio *bio; + blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); + /* * extend uptodate bool to allow < 0 value to be direct io error */ @@ -3161,7 +3347,7 @@ static int __end_that_request_first(struct request *req, int uptodate, req->errors = 0; if (!uptodate) { - if (blk_fs_request(req) && !(req->flags & REQ_QUIET)) + if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) printk("end_request: I/O error, dev %s, sector %llu\n", req->rq_disk ? req->rq_disk->disk_name : "?", (unsigned long long)req->sector); @@ -3297,12 +3483,11 @@ EXPORT_SYMBOL(end_that_request_chunk); */ static void blk_done_softirq(struct softirq_action *h) { - struct list_head *cpu_list; - LIST_HEAD(local_list); + struct list_head *cpu_list, local_list; local_irq_disable(); cpu_list = &__get_cpu_var(blk_cpu_done); - list_splice_init(cpu_list, &local_list); + list_replace_init(cpu_list, &local_list); local_irq_enable(); while (!list_empty(&local_list)) { @@ -3313,8 +3498,6 @@ static void blk_done_softirq(struct softirq_action *h) } } -#ifdef CONFIG_HOTPLUG_CPU - static int blk_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -3340,15 +3523,13 @@ static struct notifier_block __devinitdata blk_cpu_notifier = { .notifier_call = blk_cpu_notify, }; -#endif /* CONFIG_HOTPLUG_CPU */ - /** * blk_complete_request - end I/O on a request * @req: the request being processed * * Description: * Ends all I/O on a request. It does not handle partial completions, - * unless the driver actually implements this in its completionc callback + * unless the driver actually implements this in its completion callback * through requeueing. Theh actual completion happens out-of-order, * through a softirq handler. The user must have registered a completion * callback through blk_queue_softirq_done(). @@ -3390,7 +3571,12 @@ void end_that_request_last(struct request *req, int uptodate) if (unlikely(laptop_mode) && blk_fs_request(req)) laptop_io_completion(); - if (disk && blk_fs_request(req)) { + /* + * Account IO completion. bar_rq isn't accounted as a normal + * IO on queueing nor completion. Accounting the containing + * request is enough. + */ + if (disk && blk_fs_request(req) && req != &req->q->bar_rq) { unsigned long duration = jiffies - req->start_time; const int rw = rq_data_dir(req); @@ -3420,8 +3606,8 @@ EXPORT_SYMBOL(end_request); void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) { - /* first three bits are identical in rq->flags and bio->bi_rw */ - rq->flags |= (bio->bi_rw & 7); + /* first two bits are identical in rq->cmd_flags and bio->bi_rw */ + rq->cmd_flags |= (bio->bi_rw & 3); rq->nr_phys_segments = bio_phys_segments(q, bio); rq->nr_hw_segments = bio_hw_segments(q, bio); @@ -3429,6 +3615,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) rq->hard_cur_sectors = rq->current_nr_sectors; rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); rq->buffer = bio_data(bio); + rq->data_len = bio->bi_size; rq->bio = rq->biotail = bio; } @@ -3465,13 +3652,11 @@ int __init blk_dev_init(void) iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); - for_each_cpu(i) + for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); -#ifdef CONFIG_HOTPLUG_CPU - register_cpu_notifier(&blk_cpu_notifier); -#endif + register_hotcpu_notifier(&blk_cpu_notifier); blk_max_low_pfn = max_low_pfn; blk_max_pfn = max_pfn; @@ -3490,10 +3675,18 @@ void put_io_context(struct io_context *ioc) BUG_ON(atomic_read(&ioc->refcount) == 0); if (atomic_dec_and_test(&ioc->refcount)) { + struct cfq_io_context *cic; + + rcu_read_lock(); if (ioc->aic && ioc->aic->dtor) ioc->aic->dtor(ioc->aic); - if (ioc->cic && ioc->cic->dtor) - ioc->cic->dtor(ioc->cic); + if (ioc->cic_root.rb_node != NULL) { + struct rb_node *n = rb_first(&ioc->cic_root); + + cic = rb_entry(n, struct cfq_io_context, rb_node); + cic->dtor(ioc); + } + rcu_read_unlock(); kmem_cache_free(iocontext_cachep, ioc); } @@ -3503,21 +3696,21 @@ EXPORT_SYMBOL(put_io_context); /* Called by the exitting task */ void exit_io_context(void) { - unsigned long flags; struct io_context *ioc; + struct cfq_io_context *cic; - local_irq_save(flags); task_lock(current); ioc = current->io_context; current->io_context = NULL; - ioc->task = NULL; task_unlock(current); - local_irq_restore(flags); + ioc->task = NULL; if (ioc->aic && ioc->aic->exit) ioc->aic->exit(ioc->aic); - if (ioc->cic && ioc->cic->exit) - ioc->cic->exit(ioc->cic); + if (ioc->cic_root.rb_node != NULL) { + cic = rb_entry(rb_first(&ioc->cic_root), struct cfq_io_context, rb_node); + cic->exit(ioc); + } put_io_context(ioc); } @@ -3530,7 +3723,7 @@ void exit_io_context(void) * but since the current task itself holds a reference, the context can be * used in general code, so long as it stays within `current` context. */ -struct io_context *current_io_context(gfp_t gfp_flags) +static struct io_context *current_io_context(gfp_t gfp_flags, int node) { struct task_struct *tsk = current; struct io_context *ret; @@ -3539,15 +3732,17 @@ struct io_context *current_io_context(gfp_t gfp_flags) if (likely(ret)) return ret; - ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); + ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); if (ret) { atomic_set(&ret->refcount, 1); ret->task = current; - ret->set_ioprio = NULL; + ret->ioprio_changed = 0; ret->last_waited = jiffies; /* doesn't matter... */ ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; - ret->cic = NULL; + ret->cic_root.rb_node = NULL; + /* make sure set_task_ioprio() sees the settings above */ + smp_wmb(); tsk->io_context = ret; } @@ -3561,10 +3756,10 @@ EXPORT_SYMBOL(current_io_context); * * This is always called in the context of the task which submitted the I/O. */ -struct io_context *get_io_context(gfp_t gfp_flags) +struct io_context *get_io_context(gfp_t gfp_flags, int node) { struct io_context *ret; - ret = current_io_context(gfp_flags); + ret = current_io_context(gfp_flags, node); if (likely(ret)) atomic_inc(&ret->refcount); return ret; @@ -3627,21 +3822,24 @@ static ssize_t queue_requests_store(struct request_queue *q, const char *page, size_t count) { struct request_list *rl = &q->rq; + unsigned long nr; + int ret = queue_var_store(&nr, page, count); + if (nr < BLKDEV_MIN_RQ) + nr = BLKDEV_MIN_RQ; - int ret = queue_var_store(&q->nr_requests, page, count); - if (q->nr_requests < BLKDEV_MIN_RQ) - q->nr_requests = BLKDEV_MIN_RQ; + spin_lock_irq(q->queue_lock); + q->nr_requests = nr; blk_queue_congestion_threshold(q); if (rl->count[READ] >= queue_congestion_on_threshold(q)) - set_queue_congested(q, READ); + blk_set_queue_congested(q, READ); else if (rl->count[READ] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, READ); + blk_clear_queue_congested(q, READ); if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) - set_queue_congested(q, WRITE); + blk_set_queue_congested(q, WRITE); else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, WRITE); + blk_clear_queue_congested(q, WRITE); if (rl->count[READ] >= q->nr_requests) { blk_set_queue_full(q, READ); @@ -3656,6 +3854,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) blk_clear_queue_full(q, WRITE); wake_up(&rl->wait[WRITE]); } + spin_unlock_irq(q->queue_lock); return ret; } @@ -3673,9 +3872,6 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) ssize_t ret = queue_var_store(&ra_kb, page, count); spin_lock_irq(q->queue_lock); - if (ra_kb > (q->max_sectors >> 1)) - ra_kb = (q->max_sectors >> 1); - q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); spin_unlock_irq(q->queue_lock); @@ -3771,13 +3967,19 @@ static ssize_t queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { struct queue_sysfs_entry *entry = to_queue(attr); - struct request_queue *q; + request_queue_t *q = container_of(kobj, struct request_queue, kobj); + ssize_t res; - q = container_of(kobj, struct request_queue, kobj); if (!entry->show) return -EIO; - - return entry->show(q, page); + mutex_lock(&q->sysfs_lock); + if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { + mutex_unlock(&q->sysfs_lock); + return -ENOENT; + } + res = entry->show(q, page); + mutex_unlock(&q->sysfs_lock); + return res; } static ssize_t @@ -3785,13 +3987,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, const char *page, size_t length) { struct queue_sysfs_entry *entry = to_queue(attr); - struct request_queue *q; + request_queue_t *q = container_of(kobj, struct request_queue, kobj); + + ssize_t res; - q = container_of(kobj, struct request_queue, kobj); if (!entry->store) return -EIO; - - return entry->store(q, page, length); + mutex_lock(&q->sysfs_lock); + if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { + mutex_unlock(&q->sysfs_lock); + return -ENOENT; + } + res = entry->store(q, page, length); + mutex_unlock(&q->sysfs_lock); + return res; } static struct sysfs_ops queue_sysfs_ops = { @@ -3802,6 +4011,7 @@ static struct sysfs_ops queue_sysfs_ops = { static struct kobj_type queue_ktype = { .sysfs_ops = &queue_sysfs_ops, .default_attrs = default_attrs, + .release = blk_release_queue, }; int blk_register_queue(struct gendisk *disk) @@ -3814,19 +4024,17 @@ int blk_register_queue(struct gendisk *disk) return -ENXIO; q->kobj.parent = kobject_get(&disk->kobj); - if (!q->kobj.parent) - return -EBUSY; - snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); - q->kobj.ktype = &queue_ktype; - - ret = kobject_register(&q->kobj); + ret = kobject_add(&q->kobj); if (ret < 0) return ret; + kobject_uevent(&q->kobj, KOBJ_ADD); + ret = elv_register_queue(q); if (ret) { - kobject_unregister(&q->kobj); + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); return ret; } @@ -3840,7 +4048,8 @@ void blk_unregister_queue(struct gendisk *disk) if (q && q->request_fn) { elv_unregister_queue(q); - kobject_unregister(&q->kobj); + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); kobject_put(&disk->kobj); } }