enable kexec
[linux-2.6.git] / arch / i386 / mm / highmem.c
1 #include <linux/highmem.h>
2
3 void *kmap(struct page *page)
4 {
5         might_sleep();
6         if (page < highmem_start_page)
7                 return page_address(page);
8         return kmap_high(page);
9 }
10
11 void kunmap(struct page *page)
12 {
13         if (in_interrupt())
14                 BUG();
15         if (page < highmem_start_page)
16                 return;
17         kunmap_high(page);
18 }
19
20 /*
21  * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
22  * no global lock is needed and because the kmap code must perform a global TLB
23  * invalidation when the kmap pool wraps.
24  *
25  * However when holding an atomic kmap is is not legal to sleep, so atomic
26  * kmaps are appropriate for short, tight code paths only.
27  */
28 void *kmap_atomic(struct page *page, enum km_type type)
29 {
30         enum fixed_addresses idx;
31         unsigned long vaddr;
32
33         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
34         inc_preempt_count();
35         if (page < highmem_start_page)
36                 return page_address(page);
37
38         idx = type + KM_TYPE_NR*smp_processor_id();
39         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
40 #ifdef CONFIG_DEBUG_HIGHMEM
41         if (!pte_none(*(kmap_pte-idx)))
42                 BUG();
43 #endif
44         /*
45          * If the page is not a normal RAM page, then map it
46          * uncached to be on the safe side - it could be device
47          * memory that must not be prefetched:
48          */
49         if (PageReserved(page))
50                 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot_nocache));
51         else
52                 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
53         __flush_tlb_one(vaddr);
54
55         return (void*) vaddr;
56 }
57
58 /*
59  * page frame number based kmaps - useful for PCI mappings.
60  * NOTE: we map the page with the dont-cache flag.
61  */
62 void *kmap_atomic_nocache_pfn(unsigned long pfn, enum km_type type)
63 {
64         enum fixed_addresses idx;
65         unsigned long vaddr;
66
67         /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
68         inc_preempt_count();
69         if (pfn < highstart_pfn)
70                 return pfn_to_kaddr(pfn);
71
72         idx = type + KM_TYPE_NR*smp_processor_id();
73         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
74 #ifdef CONFIG_DEBUG_HIGHMEM
75         if (!pte_none(*(kmap_pte-idx)))
76                 BUG();
77 #endif
78         set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot_nocache));
79         __flush_tlb_one(vaddr);
80
81         return (void*) vaddr;
82 }
83
84
85 void kunmap_atomic(void *kvaddr, enum km_type type)
86 {
87 #ifdef CONFIG_DEBUG_HIGHMEM
88         unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
89         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
90
91         if (vaddr < FIXADDR_START) { // FIXME
92                 dec_preempt_count();
93                 preempt_check_resched();
94                 return;
95         }
96
97         if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
98                 BUG();
99
100         /*
101          * force other mappings to Oops if they'll try to access
102          * this pte without first remap it
103          */
104         pte_clear(kmap_pte-idx);
105         __flush_tlb_one(vaddr);
106 #endif
107
108         dec_preempt_count();
109         preempt_check_resched();
110 }
111
112 struct page *kmap_atomic_to_page(void *ptr)
113 {
114         unsigned long idx, vaddr = (unsigned long)ptr;
115         pte_t *pte;
116
117         if (vaddr < FIXADDR_START)
118                 return virt_to_page(ptr);
119
120         idx = virt_to_fix(vaddr);
121         pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
122         return pte_page(*pte);
123 }
124