fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / i386 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <asm/sections.h>
13 #include <asm/uaccess.h>
14 #include <asm/processor.h>
15 #include <asm/tlbflush.h>
16 #include <asm/pgalloc.h>
17 #include <asm/sections.h>
18
19 static DEFINE_SPINLOCK(cpa_lock);
20 static struct list_head df_list = LIST_HEAD_INIT(df_list);
21
22
23 pte_t *lookup_address(unsigned long address) 
24
25         pgd_t *pgd = pgd_offset_k(address);
26         pud_t *pud;
27         pmd_t *pmd;
28         if (pgd_none(*pgd))
29                 return NULL;
30         pud = pud_offset(pgd, address);
31         if (pud_none(*pud))
32                 return NULL;
33         pmd = pmd_offset(pud, address);
34         if (pmd_none(*pmd))
35                 return NULL;
36         if (pmd_large(*pmd))
37                 return (pte_t *)pmd;
38         return pte_offset_kernel(pmd, address);
39
40
41 static struct page *split_large_page(unsigned long address, pgprot_t prot,
42                                         pgprot_t ref_prot)
43
44         int i; 
45         unsigned long addr;
46         struct page *base;
47         pte_t *pbase;
48
49         spin_unlock_irq(&cpa_lock);
50         base = alloc_pages(GFP_KERNEL, 0);
51         spin_lock_irq(&cpa_lock);
52         if (!base) 
53                 return NULL;
54
55         /*
56          * page_private is used to track the number of entries in
57          * the page table page that have non standard attributes.
58          */
59         SetPagePrivate(base);
60         page_private(base) = 0;
61
62         address = __pa(address);
63         addr = address & LARGE_PAGE_MASK; 
64         pbase = (pte_t *)page_address(base);
65         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
66                set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
67                                           addr == address ? prot : ref_prot));
68         }
69         return base;
70
71
72 static void flush_kernel_map(void *arg)
73
74         unsigned long adr = (unsigned long)arg;
75
76         if (adr && cpu_has_clflush) {
77                 int i;
78                 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
79                         asm volatile("clflush (%0)" :: "r" (adr + i));
80         } else if (boot_cpu_data.x86_model >= 4)
81                 wbinvd();
82
83         /* Flush all to work around Errata in early athlons regarding 
84          * large page flushing. 
85          */
86         __flush_tlb_all();      
87 }
88
89 static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) 
90
91         struct page *page;
92         unsigned long flags;
93
94         set_pte_atomic(kpte, pte);      /* change init_mm */
95         if (HAVE_SHARED_KERNEL_PMD)
96                 return;
97
98         spin_lock_irqsave(&pgd_lock, flags);
99         for (page = pgd_list; page; page = (struct page *)page->index) {
100                 pgd_t *pgd;
101                 pud_t *pud;
102                 pmd_t *pmd;
103                 pgd = (pgd_t *)page_address(page) + pgd_index(address);
104                 pud = pud_offset(pgd, address);
105                 pmd = pmd_offset(pud, address);
106                 set_pte_atomic((pte_t *)pmd, pte);
107         }
108         spin_unlock_irqrestore(&pgd_lock, flags);
109 }
110
111 /* 
112  * No more special protections in this 2/4MB area - revert to a
113  * large page again. 
114  */
115 static inline void revert_page(struct page *kpte_page, unsigned long address)
116 {
117         pgprot_t ref_prot;
118         pte_t *linear;
119
120         ref_prot =
121         ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
122                 ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
123
124         linear = (pte_t *)
125                 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
126         set_pmd_pte(linear,  address,
127                     pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
128                             ref_prot));
129 }
130
131 static int
132 __change_page_attr(struct page *page, pgprot_t prot)
133
134         pte_t *kpte; 
135         unsigned long address;
136         struct page *kpte_page;
137
138         BUG_ON(PageHighMem(page));
139         address = (unsigned long)page_address(page);
140
141         if (address >= (unsigned long)__start_rodata && address <= (unsigned long)__end_rodata &&
142                 (pgprot_val(prot) & _PAGE_RW)) {
143                 pgprot_val(prot) &= ~(_PAGE_RW);
144                 add_taint(TAINT_MACHINE_CHECK);
145         }
146
147         kpte = lookup_address(address);
148         if (!kpte)
149                 return -EINVAL;
150         kpte_page = virt_to_page(kpte);
151         if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) { 
152                 if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
153                         set_pte_atomic(kpte, mk_pte(page, prot)); 
154                 } else {
155                         pgprot_t ref_prot;
156                         struct page *split;
157
158                         ref_prot =
159                         ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
160                                 ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
161                         split = split_large_page(address, prot, ref_prot);
162                         if (!split)
163                                 return -ENOMEM;
164                         set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
165                         kpte_page = split;
166                 }
167                 page_private(kpte_page)++;
168         } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) { 
169                 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
170                 BUG_ON(page_private(kpte_page) == 0);
171                 page_private(kpte_page)--;
172         } else
173                 BUG();
174
175         /*
176          * If the pte was reserved, it means it was created at boot
177          * time (not via split_large_page) and in turn we must not
178          * replace it with a largepage.
179          */
180         if (!PageReserved(kpte_page)) {
181                 if (cpu_has_pse && (page_private(kpte_page) == 0)) {
182                         ClearPagePrivate(kpte_page);
183                         list_add(&kpte_page->lru, &df_list);
184                         revert_page(kpte_page, address);
185                 }
186         }
187         return 0;
188
189
190 static inline void flush_map(void *adr)
191 {
192         on_each_cpu(flush_kernel_map, adr, 1, 1);
193 }
194
195 /*
196  * Change the page attributes of an page in the linear mapping.
197  *
198  * This should be used when a page is mapped with a different caching policy
199  * than write-back somewhere - some CPUs do not like it when mappings with
200  * different caching policies exist. This changes the page attributes of the
201  * in kernel linear mapping too.
202  * 
203  * The caller needs to ensure that there are no conflicting mappings elsewhere.
204  * This function only deals with the kernel linear map.
205  * 
206  * Caller must call global_flush_tlb() after this.
207  */
208 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
209 {
210         int err = 0; 
211         int i; 
212         unsigned long flags;
213
214         spin_lock_irqsave(&cpa_lock, flags);
215         for (i = 0; i < numpages; i++, page++) { 
216                 err = __change_page_attr(page, prot);
217                 if (err) 
218                         break; 
219         }       
220         spin_unlock_irqrestore(&cpa_lock, flags);
221         return err;
222 }
223
224 void global_flush_tlb(void)
225 {
226         struct list_head l;
227         struct page *pg, *next;
228
229         BUG_ON(irqs_disabled());
230
231         spin_lock_irq(&cpa_lock);
232         list_replace_init(&df_list, &l);
233         spin_unlock_irq(&cpa_lock);
234         if (!cpu_has_clflush)
235                 flush_map(0);
236         list_for_each_entry_safe(pg, next, &l, lru) {
237                 if (cpu_has_clflush)
238                         flush_map(page_address(pg));
239                 __free_page(pg);
240         }
241 }
242
243 #ifdef CONFIG_DEBUG_PAGEALLOC
244 void kernel_map_pages(struct page *page, int numpages, int enable)
245 {
246         if (PageHighMem(page))
247                 return;
248         if (!enable)
249                 debug_check_no_locks_freed(page_address(page),
250                                            numpages * PAGE_SIZE);
251
252         /* the return value is ignored - the calls cannot fail,
253          * large pages are disabled at boot time.
254          */
255         change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
256         /* we should perform an IPI and flush all tlbs,
257          * but that can deadlock->flush only current cpu.
258          */
259         __flush_tlb_all();
260 }
261 #endif
262
263 EXPORT_SYMBOL(change_page_attr);
264 EXPORT_SYMBOL(global_flush_tlb);