Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / block / ll_rw_blk.c
index 4293143..3ecdb34 100644 (file)
@@ -10,7 +10,6 @@
 /*
  * This handles all read/write requests to block devices
  */
-#include <linux/config.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/backing-dev.h>
@@ -1663,6 +1662,8 @@ static void blk_unplug_timeout(unsigned long data)
  **/
 void blk_start_queue(request_queue_t *q)
 {
+       WARN_ON(!irqs_disabled());
+
        clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
 
        /*
@@ -1878,7 +1879,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node);
  *    get dealt with eventually.
  *
  *    The queue spin lock must be held while manipulating the requests on the
- *    request queue.
+ *    request queue; this lock will be taken also from interrupt context, so irq
+ *    disabling is needed for it.
  *
  *    Function returns a pointer to the initialized request queue, or NULL if
  *    it didn't succeed.
@@ -2514,7 +2516,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
 int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
                   struct request *rq, int at_head)
 {
-       DECLARE_COMPLETION(wait);
+       DECLARE_COMPLETION_ONSTACK(wait);
        char sense[SCSI_SENSE_BUFFERSIZE];
        int err = 0;
 
@@ -2742,7 +2744,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
                return 0;
 
        /*
-        * not contigious
+        * not contiguous
         */
        if (req->sector + req->nr_sectors != next->sector)
                return 0;
@@ -2824,6 +2826,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
        if (unlikely(bio_barrier(bio)))
                req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
 
+       if (bio_sync(bio))
+               req->flags |= REQ_RW_SYNC;
+
        req->errors = 0;
        req->hard_sector = req->sector = bio->bi_sector;
        req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio);
@@ -3016,6 +3021,7 @@ void generic_make_request(struct bio *bio)
 {
        request_queue_t *q;
        sector_t maxsector;
+       sector_t old_sector;
        int ret, nr_sectors = bio_sectors(bio);
        dev_t old_dev;
 
@@ -3044,7 +3050,7 @@ void generic_make_request(struct bio *bio)
         * NOTE: we don't repeat the blk_size check for each new device.
         * Stacking drivers are expected to know what they are doing.
         */
-       maxsector = -1;
+       old_sector = -1;
        old_dev = 0;
        do {
                char b[BDEVNAME_SIZE];
@@ -3078,15 +3084,30 @@ end_io:
                 */
                blk_partition_remap(bio);
 
-               if (maxsector != -1)
+               if (old_sector != -1)
                        blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, 
-                                           maxsector);
+                                           old_sector);
 
                blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
 
-               maxsector = bio->bi_sector;
+               old_sector = bio->bi_sector;
                old_dev = bio->bi_bdev->bd_dev;
 
+               maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+               if (maxsector) {
+                       sector_t sector = bio->bi_sector;
+
+                       if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+                               /*
+                                * This may well happen - partitions are not checked
+                                * to make sure they are within the size of the
+                                * whole device.
+                                */
+                               handle_bad_sector(bio);
+                               goto end_io;
+                       }
+               }
+
                ret = q->make_request_fn(q, bio);
        } while (ret);
 }
@@ -3111,9 +3132,9 @@ void submit_bio(int rw, struct bio *bio)
        BIO_BUG_ON(!bio->bi_io_vec);
        bio->bi_rw |= rw;
        if (rw & WRITE)
-               mod_page_state(pgpgout, count);
+               count_vm_events(PGPGOUT, count);
        else
-               mod_page_state(pgpgin, count);
+               count_vm_events(PGPGIN, count);
 
        if (unlikely(block_dump)) {
                char b[BDEVNAME_SIZE];
@@ -3359,12 +3380,11 @@ EXPORT_SYMBOL(end_that_request_chunk);
  */
 static void blk_done_softirq(struct softirq_action *h)
 {
-       struct list_head *cpu_list;
-       LIST_HEAD(local_list);
+       struct list_head *cpu_list, local_list;
 
        local_irq_disable();
        cpu_list = &__get_cpu_var(blk_cpu_done);
-       list_splice_init(cpu_list, &local_list);
+       list_replace_init(cpu_list, &local_list);
        local_irq_enable();
 
        while (!list_empty(&local_list)) {
@@ -3398,7 +3418,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
 }
 
 
-static struct notifier_block blk_cpu_notifier = {
+static struct notifier_block __devinitdata blk_cpu_notifier = {
        .notifier_call  = blk_cpu_notify,
 };
 
@@ -3410,7 +3430,7 @@ static struct notifier_block blk_cpu_notifier = {
  *
  * Description:
  *     Ends all I/O on a request. It does not handle partial completions,
- *     unless the driver actually implements this in its completionc callback
+ *     unless the driver actually implements this in its completion callback
  *     through requeueing. Theh actual completion happens out-of-order,
  *     through a softirq handler. The user must have registered a completion
  *     callback through blk_queue_softirq_done().
@@ -3487,8 +3507,8 @@ EXPORT_SYMBOL(end_request);
 
 void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
 {
-       /* first three bits are identical in rq->flags and bio->bi_rw */
-       rq->flags |= (bio->bi_rw & 7);
+       /* first two bits are identical in rq->flags and bio->bi_rw */
+       rq->flags |= (bio->bi_rw & 3);
 
        rq->nr_phys_segments = bio_phys_segments(q, bio);
        rq->nr_hw_segments = bio_hw_segments(q, bio);
@@ -3536,9 +3556,7 @@ int __init blk_dev_init(void)
                INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
 
        open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
-#ifdef CONFIG_HOTPLUG_CPU
-       register_cpu_notifier(&blk_cpu_notifier);
-#endif
+       register_hotcpu_notifier(&blk_cpu_notifier);
 
        blk_max_low_pfn = max_low_pfn;
        blk_max_pfn = max_pfn;
@@ -3626,6 +3644,8 @@ struct io_context *current_io_context(gfp_t gfp_flags)
                ret->nr_batch_requests = 0; /* because this is 0 */
                ret->aic = NULL;
                ret->cic_root.rb_node = NULL;
+               /* make sure set_task_ioprio() sees the settings above */
+               smp_wmb();
                tsk->io_context = ret;
        }