X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=block%2Fll_rw_blk.c;fp=block%2Fll_rw_blk.c;h=3ecdb3476517b297c0c897edc49b58cd7a7e0895;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=4293143c93cfae1b36481f3e94d5787511890fb3;hpb=4e76c8a9fa413ccc09d3f7f664183dcce3555d57;p=linux-2.6.git diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 4293143c9..3ecdb3476 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -10,7 +10,6 @@ /* * This handles all read/write requests to block devices */ -#include #include #include #include @@ -1663,6 +1662,8 @@ static void blk_unplug_timeout(unsigned long data) **/ void blk_start_queue(request_queue_t *q) { + WARN_ON(!irqs_disabled()); + clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); /* @@ -1878,7 +1879,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node); * get dealt with eventually. * * The queue spin lock must be held while manipulating the requests on the - * request queue. + * request queue; this lock will be taken also from interrupt context, so irq + * disabling is needed for it. * * Function returns a pointer to the initialized request queue, or NULL if * it didn't succeed. @@ -2514,7 +2516,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk, struct request *rq, int at_head) { - DECLARE_COMPLETION(wait); + DECLARE_COMPLETION_ONSTACK(wait); char sense[SCSI_SENSE_BUFFERSIZE]; int err = 0; @@ -2742,7 +2744,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, return 0; /* - * not contigious + * not contiguous */ if (req->sector + req->nr_sectors != next->sector) return 0; @@ -2824,6 +2826,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) if (unlikely(bio_barrier(bio))) req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); + if (bio_sync(bio)) + req->flags |= REQ_RW_SYNC; + req->errors = 0; req->hard_sector = req->sector = bio->bi_sector; req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); @@ -3016,6 +3021,7 @@ void generic_make_request(struct bio *bio) { request_queue_t *q; sector_t maxsector; + sector_t old_sector; int ret, nr_sectors = bio_sectors(bio); dev_t old_dev; @@ -3044,7 +3050,7 @@ void generic_make_request(struct bio *bio) * NOTE: we don't repeat the blk_size check for each new device. * Stacking drivers are expected to know what they are doing. */ - maxsector = -1; + old_sector = -1; old_dev = 0; do { char b[BDEVNAME_SIZE]; @@ -3078,15 +3084,30 @@ end_io: */ blk_partition_remap(bio); - if (maxsector != -1) + if (old_sector != -1) blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, - maxsector); + old_sector); blk_add_trace_bio(q, bio, BLK_TA_QUEUE); - maxsector = bio->bi_sector; + old_sector = bio->bi_sector; old_dev = bio->bi_bdev->bd_dev; + maxsector = bio->bi_bdev->bd_inode->i_size >> 9; + if (maxsector) { + sector_t sector = bio->bi_sector; + + if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { + /* + * This may well happen - partitions are not checked + * to make sure they are within the size of the + * whole device. + */ + handle_bad_sector(bio); + goto end_io; + } + } + ret = q->make_request_fn(q, bio); } while (ret); } @@ -3111,9 +3132,9 @@ void submit_bio(int rw, struct bio *bio) BIO_BUG_ON(!bio->bi_io_vec); bio->bi_rw |= rw; if (rw & WRITE) - mod_page_state(pgpgout, count); + count_vm_events(PGPGOUT, count); else - mod_page_state(pgpgin, count); + count_vm_events(PGPGIN, count); if (unlikely(block_dump)) { char b[BDEVNAME_SIZE]; @@ -3359,12 +3380,11 @@ EXPORT_SYMBOL(end_that_request_chunk); */ static void blk_done_softirq(struct softirq_action *h) { - struct list_head *cpu_list; - LIST_HEAD(local_list); + struct list_head *cpu_list, local_list; local_irq_disable(); cpu_list = &__get_cpu_var(blk_cpu_done); - list_splice_init(cpu_list, &local_list); + list_replace_init(cpu_list, &local_list); local_irq_enable(); while (!list_empty(&local_list)) { @@ -3398,7 +3418,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, } -static struct notifier_block blk_cpu_notifier = { +static struct notifier_block __devinitdata blk_cpu_notifier = { .notifier_call = blk_cpu_notify, }; @@ -3410,7 +3430,7 @@ static struct notifier_block blk_cpu_notifier = { * * Description: * Ends all I/O on a request. It does not handle partial completions, - * unless the driver actually implements this in its completionc callback + * unless the driver actually implements this in its completion callback * through requeueing. Theh actual completion happens out-of-order, * through a softirq handler. The user must have registered a completion * callback through blk_queue_softirq_done(). @@ -3487,8 +3507,8 @@ EXPORT_SYMBOL(end_request); void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio) { - /* first three bits are identical in rq->flags and bio->bi_rw */ - rq->flags |= (bio->bi_rw & 7); + /* first two bits are identical in rq->flags and bio->bi_rw */ + rq->flags |= (bio->bi_rw & 3); rq->nr_phys_segments = bio_phys_segments(q, bio); rq->nr_hw_segments = bio_hw_segments(q, bio); @@ -3536,9 +3556,7 @@ int __init blk_dev_init(void) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); -#ifdef CONFIG_HOTPLUG_CPU - register_cpu_notifier(&blk_cpu_notifier); -#endif + register_hotcpu_notifier(&blk_cpu_notifier); blk_max_low_pfn = max_low_pfn; blk_max_pfn = max_pfn; @@ -3626,6 +3644,8 @@ struct io_context *current_io_context(gfp_t gfp_flags) ret->nr_batch_requests = 0; /* because this is 0 */ ret->aic = NULL; ret->cic_root.rb_node = NULL; + /* make sure set_task_ioprio() sees the settings above */ + smp_wmb(); tsk->io_context = ret; }