1 #include <linux/highmem.h>
2 #include <linux/module.h>
4 void *kmap(struct page *page)
7 if (!PageHighMem(page))
8 return page_address(page);
9 return kmap_high(page);
12 void kunmap(struct page *page)
16 if (!PageHighMem(page))
22 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23 * no global lock is needed and because the kmap code must perform a global TLB
24 * invalidation when the kmap pool wraps.
26 * However when holding an atomic kmap is is not legal to sleep, so atomic
27 * kmaps are appropriate for short, tight code paths only.
29 static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
31 enum fixed_addresses idx;
34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
36 if (!PageHighMem(page))
37 return page_address(page);
39 idx = type + KM_TYPE_NR*smp_processor_id();
40 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
41 #ifdef CONFIG_DEBUG_HIGHMEM
42 if (!pte_none(*(kmap_pte-idx)))
45 set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
50 void *kmap_atomic(struct page *page, enum km_type type)
52 return __kmap_atomic(page, type, kmap_prot);
55 /* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
56 void *kmap_atomic_pte(struct page *page, enum km_type type)
58 return __kmap_atomic(page, type, PAGE_KERNEL_RO);
61 void kunmap_atomic(void *kvaddr, enum km_type type)
63 #if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
64 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
65 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
67 if (vaddr < FIXADDR_START) { // FIXME
69 preempt_check_resched();
74 #if defined(CONFIG_DEBUG_HIGHMEM)
75 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
79 * force other mappings to Oops if they'll try to access
80 * this pte without first remap it
82 pte_clear(&init_mm, vaddr, kmap_pte-idx);
83 __flush_tlb_one(vaddr);
84 #elif defined(CONFIG_XEN)
86 * We must ensure there are no dangling pagetable references when
87 * returning memory to Xen (decrease_reservation).
88 * XXX TODO: We could make this faster by only zapping when
89 * kmap_flush_unused is called but that is trickier and more invasive.
91 pte_clear(&init_mm, vaddr, kmap_pte-idx);
95 preempt_check_resched();
98 /* This is the same as kmap_atomic() but can map memory that doesn't
99 * have a struct page associated with it.
101 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
103 enum fixed_addresses idx;
108 idx = type + KM_TYPE_NR*smp_processor_id();
109 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
110 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
111 __flush_tlb_one(vaddr);
113 return (void*) vaddr;
116 struct page *kmap_atomic_to_page(void *ptr)
118 unsigned long idx, vaddr = (unsigned long)ptr;
121 if (vaddr < FIXADDR_START)
122 return virt_to_page(ptr);
124 idx = virt_to_fix(vaddr);
125 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
126 return pte_page(*pte);
130 EXPORT_SYMBOL(kunmap);
131 EXPORT_SYMBOL(kmap_atomic);
132 EXPORT_SYMBOL(kunmap_atomic);
133 EXPORT_SYMBOL(kmap_atomic_to_page);