X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ppc%2Fhighmem.h;h=f7b21ee302b4f1b7fcd1e923fa14c3da0e708f6e;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=928f8447ae7fc09fb79d3c3feba615c8e0a99b40;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h index 928f8447a..f7b21ee30 100644 --- a/include/asm-ppc/highmem.h +++ b/include/asm-ppc/highmem.h @@ -35,8 +35,6 @@ extern pte_t *kmap_pte; extern pgprot_t kmap_prot; extern pte_t *pkmap_page_table; -extern void kmap_init(void) __init; - /* * Right now we initialize only a single pte table. It can be extended * easily, subsequent pte tables have to be allocated in one physical @@ -56,7 +54,7 @@ extern void kunmap_high(struct page *page); static inline void *kmap(struct page *page) { might_sleep(); - if (page < highmem_start_page) + if (!PageHighMem(page)) return page_address(page); return kmap_high(page); } @@ -64,7 +62,7 @@ static inline void *kmap(struct page *page) static inline void kunmap(struct page *page) { BUG_ON(in_interrupt()); - if (page < highmem_start_page) + if (!PageHighMem(page)) return; kunmap_high(page); } @@ -81,8 +79,8 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ - inc_preempt_count(); - if (page < highmem_start_page) + pagefault_disable(); + if (!PageHighMem(page)) return page_address(page); idx = type + KM_TYPE_NR*smp_processor_id(); @@ -90,7 +88,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type) #ifdef HIGHMEM_DEBUG BUG_ON(!pte_none(*(kmap_pte+idx))); #endif - set_pte(kmap_pte+idx, mk_pte(page, kmap_prot)); + set_pte_at(&init_mm, vaddr, kmap_pte+idx, mk_pte(page, kmap_prot)); flush_tlb_page(NULL, vaddr); return (void*) vaddr; @@ -103,8 +101,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); if (vaddr < KMAP_FIX_BEGIN) { // FIXME - dec_preempt_count(); - preempt_check_resched(); + pagefault_enable(); return; } @@ -114,11 +111,10 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type) * force other mappings to Oops if they'll try to access * this pte without first remap it */ - pte_clear(kmap_pte+idx); + pte_clear(&init_mm, vaddr, kmap_pte+idx); flush_tlb_page(NULL, vaddr); #endif - dec_preempt_count(); - preempt_check_resched(); + pagefault_enable(); } static inline struct page *kmap_atomic_to_page(void *ptr)