X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fhighmem.c;h=a02c6dec910cfe12f5c45592e316d5fa9c549205;hb=8303cb408c45685be9b956c9822a1a263edb8409;hp=432da5b68baf5ae4c730610c7f69019e387a7ca0;hpb=48ed9e9ed158dedf557fbe4b9e8b09f109e2a79a;p=linux-2.6.git diff --git a/mm/highmem.c b/mm/highmem.c index 432da5b68..a02c6dec9 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -26,20 +26,14 @@ #include #include #include +#include #include static mempool_t *page_pool, *isa_page_pool; -static void *page_pool_alloc(int gfp_mask, void *data) +static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) { - int gfp = gfp_mask | (int) (long) data; - - return alloc_page(gfp); -} - -static void page_pool_free(void *page, void *data) -{ - __free_page(page); + return mempool_alloc_pages(gfp_mask | GFP_DMA, data); } /* @@ -51,9 +45,10 @@ static void page_pool_free(void *page, void *data) * n means that there are (n-1) current users of it. */ #ifdef CONFIG_HIGHMEM + static int pkmap_count[LAST_PKMAP]; static unsigned int last_pkmap_nr; -static spinlock_t kmap_lock __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED; +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); pte_t * pkmap_page_table; @@ -79,8 +74,7 @@ static void flush_all_zero_pkmaps(void) pkmap_count[i] = 0; /* sanity check */ - if (pte_none(pkmap_page_table[i])) - BUG(); + BUG_ON(pte_none(pkmap_page_table[i])); /* * Don't need an atomic fetch-and-clear op here; @@ -90,7 +84,8 @@ static void flush_all_zero_pkmaps(void) * So no dangers, even with speculative execution. */ page = pte_page(pkmap_page_table[i]); - pte_clear(&pkmap_page_table[i]); + pte_clear(&init_mm, (unsigned long)page_address(page), + &pkmap_page_table[i]); set_page_address(page, NULL); } @@ -138,7 +133,8 @@ start: } } vaddr = PKMAP_ADDR(last_pkmap_nr); - set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); + set_pte_at(&init_mm, vaddr, + &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); pkmap_count[last_pkmap_nr] = 1; set_page_address(page, (void *)vaddr); @@ -146,6 +142,17 @@ start: return vaddr; } +#ifdef CONFIG_XEN +void kmap_flush_unused(void) +{ + spin_lock(&kmap_lock); + flush_all_zero_pkmaps(); + spin_unlock(&kmap_lock); +} + +EXPORT_SYMBOL(kmap_flush_unused); +#endif + void fastcall *kmap_high(struct page *page) { unsigned long vaddr; @@ -161,8 +168,7 @@ void fastcall *kmap_high(struct page *page) if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; - if (pkmap_count[PKMAP_NR(vaddr)] < 2) - BUG(); + BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); spin_unlock(&kmap_lock); return (void*) vaddr; } @@ -177,8 +183,7 @@ void fastcall kunmap_high(struct page *page) spin_lock(&kmap_lock); vaddr = (unsigned long)page_address(page); - if (!vaddr) - BUG(); + BUG_ON(!vaddr); nr = PKMAP_NR(vaddr); /* @@ -222,9 +227,8 @@ static __init int init_emergency_pool(void) if (!i.totalhigh) return 0; - page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); - if (!page_pool) - BUG(); + page_pool = mempool_create_page_pool(POOL_SIZE, 0); + BUG_ON(!page_pool); printk("highmem bounce pool size: %d pages\n", POOL_SIZE); return 0; @@ -265,9 +269,9 @@ int init_emergency_isa_pool(void) if (isa_page_pool) return 0; - isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); - if (!isa_page_pool) - BUG(); + isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, + mempool_free_pages, (void *) 0); + BUG_ON(!isa_page_pool); printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); return 0; @@ -284,7 +288,7 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) struct bio_vec *tovec, *fromvec; int i; - bio_for_each_segment(tovec, to, i) { + __bio_for_each_segment(tovec, to, i, 0) { fromvec = from->bi_io_vec + i; /* @@ -300,40 +304,42 @@ static void copy_to_high_bio_irq(struct bio *to, struct bio *from) */ vfrom = page_address(fromvec->bv_page) + tovec->bv_offset; + flush_dcache_page(tovec->bv_page); bounce_copy_vec(tovec, vfrom); } } -static void bounce_end_io(struct bio *bio, mempool_t *pool) +static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) { struct bio *bio_orig = bio->bi_private; struct bio_vec *bvec, *org_vec; - int i, err = 0; + int i; - if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - err = -EIO; + if (test_bit(BIO_EOPNOTSUPP, &bio->bi_flags)) + set_bit(BIO_EOPNOTSUPP, &bio_orig->bi_flags); /* * free up bounce indirect pages used */ - bio_for_each_segment(bvec, bio, i) { + __bio_for_each_segment(bvec, bio, i, 0) { org_vec = bio_orig->bi_io_vec + i; if (bvec->bv_page == org_vec->bv_page) continue; mempool_free(bvec->bv_page, pool); + dec_page_state(nr_bounce); } bio_endio(bio_orig, bio_orig->bi_size, err); bio_put(bio); } -static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) +static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err) { if (bio->bi_size) return 1; - bounce_end_io(bio, page_pool); + bounce_end_io(bio, page_pool, err); return 0; } @@ -342,18 +348,18 @@ static int bounce_end_io_write_isa(struct bio *bio, unsigned int bytes_done, int if (bio->bi_size) return 1; - bounce_end_io(bio, isa_page_pool); + bounce_end_io(bio, isa_page_pool, err); return 0; } -static void __bounce_end_io_read(struct bio *bio, mempool_t *pool) +static void __bounce_end_io_read(struct bio *bio, mempool_t *pool, int err) { struct bio *bio_orig = bio->bi_private; if (test_bit(BIO_UPTODATE, &bio->bi_flags)) copy_to_high_bio_irq(bio_orig, bio); - bounce_end_io(bio, pool); + bounce_end_io(bio, pool, err); } static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) @@ -361,7 +367,7 @@ static int bounce_end_io_read(struct bio *bio, unsigned int bytes_done, int err) if (bio->bi_size) return 1; - __bounce_end_io_read(bio, page_pool); + __bounce_end_io_read(bio, page_pool, err); return 0; } @@ -370,12 +376,12 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int if (bio->bi_size) return 1; - __bounce_end_io_read(bio, isa_page_pool); + __bounce_end_io_read(bio, isa_page_pool, err); return 0; } static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, - mempool_t *pool) + mempool_t *pool) { struct page *page; struct bio *bio = NULL; @@ -402,10 +408,12 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, to->bv_page = mempool_alloc(pool, q->bounce_gfp); to->bv_len = from->bv_len; to->bv_offset = from->bv_offset; + inc_page_state(nr_bounce); if (rw == WRITE) { char *vto, *vfrom; + flush_dcache_page(from->bv_page); vto = page_address(to->bv_page) + to->bv_offset; vfrom = kmap(from->bv_page) + from->bv_offset; memcpy(vto, vfrom, to->bv_len); @@ -423,7 +431,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, * at least one page was bounced, fill in possible non-highmem * pages */ - bio_for_each_segment(from, *bio_orig, i) { + __bio_for_each_segment(from, *bio_orig, i, 0) { to = bio_iovec_idx(bio, i); if (!to->bv_page) { to->bv_page = from->bv_page; @@ -473,6 +481,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) pool = isa_page_pool; } + blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); + /* * slow path */