Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / mm / highmem.c
index 432da5b..9b274fd 100644 (file)
 #include <linux/init.h>
 #include <linux/hash.h>
 #include <linux/highmem.h>
+#include <linux/blktrace_api.h>
 #include <asm/tlbflush.h>
 
 static mempool_t *page_pool, *isa_page_pool;
 
-static void *page_pool_alloc(int gfp_mask, void *data)
+static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
 {
-       int gfp = gfp_mask | (int) (long) data;
-
-       return alloc_page(gfp);
-}
-
-static void page_pool_free(void *page, void *data)
-{
-       __free_page(page);
+       return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
 }
 
 /*
@@ -51,9 +45,10 @@ static void page_pool_free(void *page, void *data)
  *  n means that there are (n-1) current users of it.
  */
 #ifdef CONFIG_HIGHMEM
+
 static int pkmap_count[LAST_PKMAP];
 static unsigned int last_pkmap_nr;
-static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
 
 pte_t * pkmap_page_table;
 
@@ -79,8 +74,7 @@ static void flush_all_zero_pkmaps(void)
                pkmap_count[i] = 0;
 
                /* sanity check */
-               if (pte_none(pkmap_page_table[i]))
-                       BUG();
+               BUG_ON(pte_none(pkmap_page_table[i]));
 
                /*
                 * Don't need an atomic fetch-and-clear op here;
@@ -90,7 +84,8 @@ static void flush_all_zero_pkmaps(void)
                 * So no dangers, even with speculative execution.
                 */
                page = pte_page(pkmap_page_table[i]);
-               pte_clear(&pkmap_page_table[i]);
+               pte_clear(&init_mm, (unsigned long)page_address(page),
+                         &pkmap_page_table[i]);
 
                set_page_address(page, NULL);
        }
@@ -138,7 +133,8 @@ start:
                }
        }
        vaddr = PKMAP_ADDR(last_pkmap_nr);
-       set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+       set_pte_at(&init_mm, vaddr,
+                  &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
 
        pkmap_count[last_pkmap_nr] = 1;
        set_page_address(page, (void *)vaddr);
@@ -161,8 +157,7 @@ void fastcall *kmap_high(struct page *page)
        if (!vaddr)
                vaddr = map_new_virtual(page);
        pkmap_count[PKMAP_NR(vaddr)]++;
-       if (pkmap_count[PKMAP_NR(vaddr)] < 2)
-               BUG();
+       BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
        spin_unlock(&kmap_lock);
        return (void*) vaddr;
 }
@@ -177,8 +172,7 @@ void fastcall kunmap_high(struct page *page)
 
        spin_lock(&kmap_lock);
        vaddr = (unsigned long)page_address(page);
-       if (!vaddr)
-               BUG();
+       BUG_ON(!vaddr);
        nr = PKMAP_NR(vaddr);
 
        /*
@@ -222,9 +216,8 @@ static __init int init_emergency_pool(void)
        if (!i.totalhigh)
                return 0;
 
-       page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL);
-       if (!page_pool)
-               BUG();
+       page_pool = mempool_create_page_pool(POOL_SIZE, 0);
+       BUG_ON(!page_pool);
        printk("highmem bounce pool size: %d pages\n", POOL_SIZE);
 
        return 0;
@@ -265,9 +258,9 @@ int init_emergency_isa_pool(void)
        if (isa_page_pool)
                return 0;
 
-       isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA);
-       if (!isa_page_pool)
-               BUG();
+       isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
+                                      mempool_free_pages, (void *) 0);
+       BUG_ON(!isa_page_pool);
 
        printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE);
        return 0;
@@ -284,7 +277,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
        struct bio_vec *tovec, *fromvec;
        int i;
 
-       bio_for_each_segment(tovec, to, i) {
+       __bio_for_each_segment(tovec, to, i, 0) {
                fromvec = from->bi_io_vec + i;
 
                /*
@@ -300,40 +293,42 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                 */
                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
+               flush_dcache_page(tovec->bv_page);
                bounce_copy_vec(tovec, vfrom);
        }
 }
 
-static void bounce_end_io(struct bio *bio, mempool_t *pool)
+static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
 {
        struct bio *bio_orig = bio->bi_private;
        struct bio_vec *bvec, *org_vec;
-       int i, err = 0;
+       int i;
 
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               err = -EIO;
+       if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
+               set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
 
        /*
         * free up bounce indirect pages used
         */
-       bio_for_each_segment(bvec, bio, i) {
+       __bio_for_each_segment(bvec, bio, i, 0) {
                org_vec = bio_orig->bi_io_vec + i;
                if (bvec->bv_page == org_vec->bv_page)
                        continue;
 
                mempool_free(bvec->bv_page, pool);      
+               dec_page_state(nr_bounce);
        }
 
        bio_endio(bio_orig, bio_orig->bi_size, err);
        bio_put(bio);
 }
 
-static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err)
+static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err)
 {
        if (bio->bi_size)
                return 1;
 
-       bounce_end_io(bio, page_pool);
+       bounce_end_io(bio, page_pool, err);
        return 0;
 }
 
@@ -342,18 +337,18 @@ static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int
        if (bio->bi_size)
                return 1;
 
-       bounce_end_io(bio, isa_page_pool);
+       bounce_end_io(bio, isa_page_pool, err);
        return 0;
 }
 
-static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
+static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
 {
        struct bio *bio_orig = bio->bi_private;
 
        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
                copy_to_high_bio_irq(bio_orig, bio);
 
-       bounce_end_io(bio, pool);
+       bounce_end_io(bio, pool, err);
 }
 
 static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
@@ -361,7 +356,7 @@ static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
        if (bio->bi_size)
                return 1;
 
-       __bounce_end_io_read(bio, page_pool);
+       __bounce_end_io_read(bio, page_pool, err);
        return 0;
 }
 
@@ -370,12 +365,12 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
        if (bio->bi_size)
                return 1;
 
-       __bounce_end_io_read(bio, isa_page_pool);
+       __bounce_end_io_read(bio, isa_page_pool, err);
        return 0;
 }
 
 static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
-                       mempool_t *pool)
+                              mempool_t *pool)
 {
        struct page *page;
        struct bio *bio = NULL;
@@ -402,10 +397,12 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
                to->bv_page = mempool_alloc(pool, q->bounce_gfp);
                to->bv_len = from->bv_len;
                to->bv_offset = from->bv_offset;
+               inc_page_state(nr_bounce);
 
                if (rw == WRITE) {
                        char *vto, *vfrom;
 
+                       flush_dcache_page(from->bv_page);
                        vto = page_address(to->bv_page) + to->bv_offset;
                        vfrom = kmap(from->bv_page) + from->bv_offset;
                        memcpy(vto, vfrom, to->bv_len);
@@ -423,7 +420,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
         * at least one page was bounced, fill in possible non-highmem
         * pages
         */
-       bio_for_each_segment(from, *bio_orig, i) {
+       __bio_for_each_segment(from, *bio_orig, i, 0) {
                to = bio_iovec_idx(bio, i);
                if (!to->bv_page) {
                        to->bv_page = from->bv_page;
@@ -473,6 +470,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
                pool = isa_page_pool;
        }
 
+       blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE);
+
        /*
         * slow path
         */