X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fhighmem.c;h=a02c6dec910cfe12f5c45592e316d5fa9c549205;hb=4e76c8a9fa413ccc09d3f7f664183dcce3555d57;hp=e4cb0aadadaa151c697c3d073349398794dd8216;hpb=6a77f38946aaee1cd85eeec6cf4229b204c15071;p=linux-2.6.git diff --git a/mm/highmem.c b/mm/highmem.c index e4cb0aada..a02c6dec9 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -26,20 +26,14 @@ #include #include #include +#include #include static mempool_t *page_pool, *isa_page_pool; -static void *page_pool_alloc(int gfp_mask, void *data) +static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) { - int gfp = gfp_mask | (int) (long) data; - - return alloc_page(gfp); -} - -static void page_pool_free(void *page, void *data) -{ - __free_page(page); + return mempool_alloc_pages(gfp_mask | GFP_DMA, data); } /* @@ -51,6 +45,7 @@ static void page_pool_free(void *page, void *data) * n means that there are (n-1) current users of it. */ #ifdef CONFIG_HIGHMEM + static int pkmap_count[LAST_PKMAP]; static unsigned int last_pkmap_nr; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); @@ -79,8 +74,7 @@ static void flush_all_zero_pkmaps(void) pkmap_count[i] = 0; /* sanity check */ - if (pte_none(pkmap_page_table[i])) - BUG(); + BUG_ON(pte_none(pkmap_page_table[i])); /* * Don't need an atomic fetch-and-clear op here; @@ -90,7 +84,8 @@ static void flush_all_zero_pkmaps(void) * So no dangers, even with speculative execution. */ page = pte_page(pkmap_page_table[i]); - pte_clear(&pkmap_page_table[i]); + pte_clear(&init_mm, (unsigned long)page_address(page), + &pkmap_page_table[i]); set_page_address(page, NULL); } @@ -138,7 +133,8 @@ start: } } vaddr = PKMAP_ADDR(last_pkmap_nr); - set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); + set_pte_at(&init_mm, vaddr, + &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); pkmap_count[last_pkmap_nr] = 1; set_page_address(page, (void *)vaddr); @@ -146,6 +142,17 @@ start: return vaddr; } +#ifdef CONFIG_XEN +void kmap_flush_unused(void) +{ + spin_lock(&kmap_lock); + flush_all_zero_pkmaps(); + spin_unlock(&kmap_lock); +} + +EXPORT_SYMBOL(kmap_flush_unused); +#endif + void fastcall *kmap_high(struct page *page) { unsigned long vaddr; @@ -161,8 +168,7 @@ void fastcall *kmap_high(struct page *page) if (!vaddr) vaddr = map_new_virtual(page); pkmap_count[PKMAP_NR(vaddr)]++; - if (pkmap_count[PKMAP_NR(vaddr)] < 2) - BUG(); + BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2); spin_unlock(&kmap_lock); return (void*) vaddr; } @@ -177,8 +183,7 @@ void fastcall kunmap_high(struct page *page) spin_lock(&kmap_lock); vaddr = (unsigned long)page_address(page); - if (!vaddr) - BUG(); + BUG_ON(!vaddr); nr = PKMAP_NR(vaddr); /* @@ -222,9 +227,8 @@ static __init int init_emergency_pool(void) if (!i.totalhigh) return 0; - page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); - if (!page_pool) - BUG(); + page_pool = mempool_create_page_pool(POOL_SIZE, 0); + BUG_ON(!page_pool); printk("highmem bounce pool size: %d pages\n", POOL_SIZE); return 0; @@ -265,9 +269,9 @@ int init_emergency_isa_pool(void) if (isa_page_pool) return 0; - isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc, page_pool_free, (void *) __GFP_DMA); - if (!isa_page_pool) - BUG(); + isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, + mempool_free_pages, (void *) 0); + BUG_ON(!isa_page_pool); printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE); return 0; @@ -323,13 +327,14 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) continue; mempool_free(bvec->bv_page, pool); + dec_page_state(nr_bounce); } bio_endio(bio_orig, bio_orig->bi_size, err); bio_put(bio); } -static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) +static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err) { if (bio->bi_size) return 1; @@ -376,7 +381,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int } static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, - mempool_t *pool) + mempool_t *pool) { struct page *page; struct bio *bio = NULL; @@ -403,6 +408,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, to->bv_page = mempool_alloc(pool, q->bounce_gfp); to->bv_len = from->bv_len; to->bv_offset = from->bv_offset; + inc_page_state(nr_bounce); if (rw == WRITE) { char *vto, *vfrom; @@ -475,6 +481,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) pool = isa_page_pool; } + blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); + /* * slow path */