This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / mm / highmem.c
index 432da5b..4009115 100644 (file)
@@ -30,9 +30,9 @@
 
 static mempool_t *page_pool, *isa_page_pool;
 
-static void *page_pool_alloc(int gfp_mask, void *data)
+static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data)
 {
-       int gfp = gfp_mask | (int) (long) data;
+       unsigned int gfp = gfp_mask | (unsigned int) (long) data;
 
        return alloc_page(gfp);
 }
@@ -53,7 +53,7 @@ static void page_pool_free(void *page, void *data)
 #ifdef CONFIG_HIGHMEM
 static int pkmap_count[LAST_PKMAP];
 static unsigned int last_pkmap_nr;
-static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
+static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
 
 pte_t * pkmap_page_table;
 
@@ -90,7 +90,8 @@ static void flush_all_zero_pkmaps(void)
                 * So no dangers, even with speculative execution.
                 */
                page = pte_page(pkmap_page_table[i]);
-               pte_clear(&pkmap_page_table[i]);
+               pte_clear(&init_mm, (unsigned long)page_address(page),
+                         &pkmap_page_table[i]);
 
                set_page_address(page, NULL);
        }
@@ -138,7 +139,8 @@ start:
                }
        }
        vaddr = PKMAP_ADDR(last_pkmap_nr);
-       set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
+       set_pte_at(&init_mm, vaddr,
+                  &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
 
        pkmap_count[last_pkmap_nr] = 1;
        set_page_address(page, (void *)vaddr);
@@ -284,7 +286,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
        struct bio_vec *tovec, *fromvec;
        int i;
 
-       bio_for_each_segment(tovec, to, i) {
+       __bio_for_each_segment(tovec, to, i, 0) {
                fromvec = from->bi_io_vec + i;
 
                /*
@@ -300,28 +302,30 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
                 */
                vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
 
+               flush_dcache_page(tovec->bv_page);
                bounce_copy_vec(tovec, vfrom);
        }
 }
 
-static void bounce_end_io(struct bio *bio, mempool_t *pool)
+static void bounce_end_io(struct bio *bio, mempool_t *pool, int err)
 {
        struct bio *bio_orig = bio->bi_private;
        struct bio_vec *bvec, *org_vec;
-       int i, err = 0;
+       int i;
 
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               err = -EIO;
+       if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags))
+               set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags);
 
        /*
         * free up bounce indirect pages used
         */
-       bio_for_each_segment(bvec, bio, i) {
+       __bio_for_each_segment(bvec, bio, i, 0) {
                org_vec = bio_orig->bi_io_vec + i;
                if (bvec->bv_page == org_vec->bv_page)
                        continue;
 
                mempool_free(bvec->bv_page, pool);      
+               dec_page_state(nr_bounce);
        }
 
        bio_endio(bio_orig, bio_orig->bi_size, err);
@@ -333,7 +337,7 @@ static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err)
        if (bio->bi_size)
                return 1;
 
-       bounce_end_io(bio, page_pool);
+       bounce_end_io(bio, page_pool, err);
        return 0;
 }
 
@@ -342,18 +346,18 @@ static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int
        if (bio->bi_size)
                return 1;
 
-       bounce_end_io(bio, isa_page_pool);
+       bounce_end_io(bio, isa_page_pool, err);
        return 0;
 }
 
-static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
+static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err)
 {
        struct bio *bio_orig = bio->bi_private;
 
        if (test_bit(BIO_UPTODATE, &bio->bi_flags))
                copy_to_high_bio_irq(bio_orig, bio);
 
-       bounce_end_io(bio, pool);
+       bounce_end_io(bio, pool, err);
 }
 
 static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
@@ -361,7 +365,7 @@ static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
        if (bio->bi_size)
                return 1;
 
-       __bounce_end_io_read(bio, page_pool);
+       __bounce_end_io_read(bio, page_pool, err);
        return 0;
 }
 
@@ -370,7 +374,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int
        if (bio->bi_size)
                return 1;
 
-       __bounce_end_io_read(bio, isa_page_pool);
+       __bounce_end_io_read(bio, isa_page_pool, err);
        return 0;
 }
 
@@ -402,10 +406,12 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
                to->bv_page = mempool_alloc(pool, q->bounce_gfp);
                to->bv_len = from->bv_len;
                to->bv_offset = from->bv_offset;
+               inc_page_state(nr_bounce);
 
                if (rw == WRITE) {
                        char *vto, *vfrom;
 
+                       flush_dcache_page(from->bv_page);
                        vto = page_address(to->bv_page) + to->bv_offset;
                        vfrom = kmap(from->bv_page) + from->bv_offset;
                        memcpy(vto, vfrom, to->bv_len);
@@ -423,7 +429,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig,
         * at least one page was bounced, fill in possible non-highmem
         * pages
         */
-       bio_for_each_segment(from, *bio_orig, i) {
+       __bio_for_each_segment(from, *bio_orig, i, 0) {
                to = bio_iovec_idx(bio, i);
                if (!to->bv_page) {
                        to->bv_page = from->bv_page;