static inline void *kmap(struct page *page)
{
might_sleep();
- if (page < highmem_start_page)
+ if (!PageHighMem(page))
return page_address(page);
return kmap_high(page);
}
static inline void kunmap(struct page *page)
{
BUG_ON(in_interrupt());
- if (page < highmem_start_page)
+ if (!PageHighMem(page))
return;
kunmap_high(page);
}
/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count();
- if (page < highmem_start_page)
+ if (!PageHighMem(page))
return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
BUG_ON(!pte_none(*(kmap_pte+idx)));
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
- flush_tlb_page(0, vaddr);
+ flush_tlb_page(NULL, vaddr);
return (void*) vaddr;
}
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
- flush_tlb_page(0, vaddr);
+ flush_tlb_page(NULL, vaddr);
#endif
dec_preempt_count();
preempt_check_resched();