fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / i386 / mm / highmem-xen.c
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3
4 void *kmap(struct page *page)
5 {
6         might_sleep();
7         if (!PageHighMem(page))
8                 return page_address(page);
9         return kmap_high(page);
10 }
11
12 void kunmap(struct page *page)
13 {
14         if (in_interrupt())
15                 BUG();
16         if (!PageHighMem(page))
17                 return;
18         kunmap_high(page);
19 }
20
21 /*
22  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
23  * no global lock is needed and because the kmap code must perform a global TLB
24  * invalidation when the kmap pool wraps.
25  *
26  * However when holding an atomic kmap is is not legal to sleep, so atomic
27  * kmaps are appropriate for short, tight code paths only.
28  */
29 static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
30 {
31         enum fixed_addresses idx;
32         unsigned long vaddr;
33
34         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35         pagefault_disable();
36         if (!PageHighMem(page))
37                 return page_address(page);
38
39         idx = type + KM_TYPE_NR*smp_processor_id();
40         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
41         if (!pte_none(*(kmap_pte-idx)))
42                 BUG();
43         set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
44
45         return (void*) vaddr;
46 }
47
48 void *kmap_atomic(struct page *page, enum km_type type)
49 {
50         return __kmap_atomic(page, type, kmap_prot);
51 }
52
53 /* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
54 void *kmap_atomic_pte(struct page *page, enum km_type type)
55 {
56         return __kmap_atomic(page, type, PAGE_KERNEL_RO);
57 }
58
59 void kunmap_atomic(void *kvaddr, enum km_type type)
60 {
61         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
62         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
63
64         /*
65          * Force other mappings to Oops if they'll try to access this pte
66          * without first remap it.  Keeping stale mappings around is a bad idea
67          * also, in case the page changes cacheability attributes or becomes
68          * a protected page in a hypervisor.
69          */
70         if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
71                 kpte_clear_flush(kmap_pte-idx, vaddr);
72                 __flush_tlb_one(vaddr);
73         } else {
74 #ifdef CONFIG_DEBUG_HIGHMEM
75                 BUG_ON(vaddr < PAGE_OFFSET);
76                 BUG_ON(vaddr >= (unsigned long)high_memory);
77 #endif
78         }
79
80         pagefault_enable();
81 }
82
83 /* This is the same as kmap_atomic() but can map memory that doesn't
84  * have a struct page associated with it.
85  */
86 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
87 {
88         enum fixed_addresses idx;
89         unsigned long vaddr;
90
91         pagefault_disable();
92
93         idx = type + KM_TYPE_NR*smp_processor_id();
94         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
95         set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
96
97         return (void*) vaddr;
98 }
99
100 struct page *kmap_atomic_to_page(void *ptr)
101 {
102         unsigned long idx, vaddr = (unsigned long)ptr;
103         pte_t *pte;
104
105         if (vaddr < FIXADDR_START)
106                 return virt_to_page(ptr);
107
108         idx = virt_to_fix(vaddr);
109         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
110         return pte_page(*pte);
111 }
112
113 EXPORT_SYMBOL(kmap);
114 EXPORT_SYMBOL(kunmap);
115 EXPORT_SYMBOL(kmap_atomic);
116 EXPORT_SYMBOL(kunmap_atomic);
117 EXPORT_SYMBOL(kmap_atomic_to_page);