fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / x86_64 / mm / pageattr.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/highmem.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <asm/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/tlbflush.h>
14 #include <asm/io.h>
15
16 #ifdef CONFIG_XEN
17 #include <asm/pgalloc.h>
18 #include <asm/mmu_context.h>
19
20 LIST_HEAD(mm_unpinned);
21 DEFINE_SPINLOCK(mm_unpinned_lock);
22
23 static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
24 {
25         struct page *page = virt_to_page(pt);
26         unsigned long pfn = page_to_pfn(page);
27
28         BUG_ON(HYPERVISOR_update_va_mapping(
29                        (unsigned long)__va(pfn << PAGE_SHIFT),
30                        pfn_pte(pfn, flags), 0));
31 }
32
33 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
34 {
35         pgd_t       *pgd;
36         pud_t       *pud;
37         pmd_t       *pmd;
38         pte_t       *pte;
39         int          g,u,m;
40
41         pgd = mm->pgd;
42         /*
43          * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
44          * be the 'current' task's pagetables (e.g., current may be 32-bit,
45          * but the pagetables may be for a 64-bit task).
46          * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
47          * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
48          */
49         for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
50                 if (pgd_none(*pgd))
51                         continue;
52                 pud = pud_offset(pgd, 0);
53                 if (PTRS_PER_PUD > 1) /* not folded */ 
54                         mm_walk_set_prot(pud,flags);
55                 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
56                         if (pud_none(*pud))
57                                 continue;
58                         pmd = pmd_offset(pud, 0);
59                         if (PTRS_PER_PMD > 1) /* not folded */ 
60                                 mm_walk_set_prot(pmd,flags);
61                         for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
62                                 if (pmd_none(*pmd))
63                                         continue;
64                                 pte = pte_offset_kernel(pmd,0);
65                                 mm_walk_set_prot(pte,flags);
66                         }
67                 }
68         }
69 }
70
71 void mm_pin(struct mm_struct *mm)
72 {
73         if (xen_feature(XENFEAT_writable_page_tables))
74                 return;
75
76         spin_lock(&mm->page_table_lock);
77
78         mm_walk(mm, PAGE_KERNEL_RO);
79         BUG_ON(HYPERVISOR_update_va_mapping(
80                        (unsigned long)mm->pgd,
81                        pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
82                        UVMF_TLB_FLUSH));
83         BUG_ON(HYPERVISOR_update_va_mapping(
84                        (unsigned long)__user_pgd(mm->pgd),
85                        pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
86                        UVMF_TLB_FLUSH));
87         xen_pgd_pin(__pa(mm->pgd)); /* kernel */
88         xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
89         mm->context.pinned = 1;
90         spin_lock(&mm_unpinned_lock);
91         list_del(&mm->context.unpinned);
92         spin_unlock(&mm_unpinned_lock);
93
94         spin_unlock(&mm->page_table_lock);
95 }
96
97 void mm_unpin(struct mm_struct *mm)
98 {
99         if (xen_feature(XENFEAT_writable_page_tables))
100                 return;
101
102         spin_lock(&mm->page_table_lock);
103
104         xen_pgd_unpin(__pa(mm->pgd));
105         xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
106         BUG_ON(HYPERVISOR_update_va_mapping(
107                        (unsigned long)mm->pgd,
108                        pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
109         BUG_ON(HYPERVISOR_update_va_mapping(
110                        (unsigned long)__user_pgd(mm->pgd),
111                        pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
112         mm_walk(mm, PAGE_KERNEL);
113         xen_tlb_flush();
114         mm->context.pinned = 0;
115         spin_lock(&mm_unpinned_lock);
116         list_add(&mm->context.unpinned, &mm_unpinned);
117         spin_unlock(&mm_unpinned_lock);
118
119         spin_unlock(&mm->page_table_lock);
120 }
121
122 void mm_pin_all(void)
123 {
124         if (xen_feature(XENFEAT_writable_page_tables))
125                 return;
126
127         while (!list_empty(&mm_unpinned))       
128                 mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
129                                   context.unpinned));
130 }
131
132 void _arch_dup_mmap(struct mm_struct *mm)
133 {
134     if (!mm->context.pinned)
135         mm_pin(mm);
136 }
137
138 void _arch_exit_mmap(struct mm_struct *mm)
139 {
140     struct task_struct *tsk = current;
141
142     task_lock(tsk);
143
144     /*
145      * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
146      * *much* faster this way, as no tlb flushes means bigger wrpt batches.
147      */
148     if ( tsk->active_mm == mm )
149     {
150         tsk->active_mm = &init_mm;
151         atomic_inc(&init_mm.mm_count);
152
153         switch_mm(mm, &init_mm, tsk);
154
155         atomic_dec(&mm->mm_count);
156         BUG_ON(atomic_read(&mm->mm_count) == 0);
157     }
158
159     task_unlock(tsk);
160
161     if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
162          !mm->context.has_foreign_mappings )
163         mm_unpin(mm);
164 }
165
166 void pte_free(struct page *pte)
167 {
168         unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
169
170         if (!pte_write(*virt_to_ptep(va)))
171                 BUG_ON(HYPERVISOR_update_va_mapping(
172                         va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
173         __free_page(pte);
174 }
175 #endif  /* CONFIG_XEN */
176
177 static inline pte_t *lookup_address(unsigned long address) 
178
179         pgd_t *pgd = pgd_offset_k(address);
180         pud_t *pud;
181         pmd_t *pmd;
182         pte_t *pte;
183         if (pgd_none(*pgd))
184                 return NULL;
185         pud = pud_offset(pgd, address);
186         if (!pud_present(*pud))
187                 return NULL; 
188         pmd = pmd_offset(pud, address);
189         if (!pmd_present(*pmd))
190                 return NULL; 
191         if (pmd_large(*pmd))
192                 return (pte_t *)pmd;
193         pte = pte_offset_kernel(pmd, address);
194         if (pte && !pte_present(*pte))
195                 pte = NULL; 
196         return pte;
197
198
199 static struct page *split_large_page(unsigned long address, pgprot_t prot,
200                                      pgprot_t ref_prot)
201
202         int i; 
203         unsigned long addr;
204         struct page *base = alloc_pages(GFP_KERNEL, 0);
205         pte_t *pbase;
206         if (!base) 
207                 return NULL;
208         /*
209          * page_private is used to track the number of entries in
210          * the page table page have non standard attributes.
211          */
212         SetPagePrivate(base);
213         page_private(base) = 0;
214
215         address = __pa(address);
216         addr = address & LARGE_PAGE_MASK; 
217         pbase = (pte_t *)page_address(base);
218         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
219                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
220                                    addr == address ? prot : ref_prot);
221         }
222         return base;
223
224
225 static void cache_flush_page(void *adr)
226 {
227         int i;
228         for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
229                 asm volatile("clflush (%0)" :: "r" (adr + i));
230 }
231
232 static void flush_kernel_map(void *arg)
233 {
234         struct list_head *l = (struct list_head *)arg;
235         struct page *pg;
236
237         /* When clflush is available always use it because it is
238            much cheaper than WBINVD */
239         if (!cpu_has_clflush)
240                 asm volatile("wbinvd" ::: "memory");
241         list_for_each_entry(pg, l, lru) {
242                 void *adr = page_address(pg);
243                 if (cpu_has_clflush)
244                         cache_flush_page(adr);
245         }
246         __flush_tlb_all();
247 }
248
249 static inline void flush_map(struct list_head *l)
250 {       
251         on_each_cpu(flush_kernel_map, l, 1, 1);
252 }
253
254 static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
255
256 static inline void save_page(struct page *fpage)
257 {
258         list_add(&fpage->lru, &deferred_pages);
259 }
260
261 /* 
262  * No more special protections in this 2/4MB area - revert to a
263  * large page again. 
264  */
265 static void revert_page(unsigned long address, pgprot_t ref_prot)
266 {
267         pgd_t *pgd;
268         pud_t *pud;
269         pmd_t *pmd;
270         pte_t large_pte;
271
272         pgd = pgd_offset_k(address);
273         BUG_ON(pgd_none(*pgd));
274         pud = pud_offset(pgd,address);
275         BUG_ON(pud_none(*pud));
276         pmd = pmd_offset(pud, address);
277         BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
278         large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
279         large_pte = pte_mkhuge(large_pte);
280         set_pte((pte_t *)pmd, large_pte);
281 }      
282
283 static int
284 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
285                                    pgprot_t ref_prot)
286
287         pte_t *kpte; 
288         struct page *kpte_page;
289         pgprot_t ref_prot2;
290         kpte = lookup_address(address);
291         if (!kpte) return 0;
292         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
293         if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
294                 if (!pte_huge(*kpte)) {
295                         set_pte(kpte, pfn_pte(pfn, prot));
296                 } else {
297                         /*
298                          * split_large_page will take the reference for this
299                          * change_page_attr on the split page.
300                          */
301                         struct page *split;
302                         ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
303                         split = split_large_page(address, prot, ref_prot2);
304                         if (!split)
305                                 return -ENOMEM;
306                         set_pte(kpte, mk_pte(split, ref_prot2));
307                         kpte_page = split;
308                 }
309                 page_private(kpte_page)++;
310         } else if (!pte_huge(*kpte)) {
311                 set_pte(kpte, pfn_pte(pfn, ref_prot));
312                 BUG_ON(page_private(kpte_page) == 0);
313                 page_private(kpte_page)--;
314         } else
315                 BUG();
316
317         /* on x86-64 the direct mapping set at boot is not using 4k pages */
318         /*
319          * ..., but the XEN guest kernels (currently) do:
320          * If the pte was reserved, it means it was created at boot
321          * time (not via split_large_page) and in turn we must not
322          * replace it with a large page.
323          */
324 #ifndef CONFIG_XEN
325         BUG_ON(PageReserved(kpte_page));
326 #else
327         if(!PageReserved(kpte_page))
328 #endif
329
330         if (page_private(kpte_page) == 0) {
331                 save_page(kpte_page);
332                 revert_page(address, ref_prot);
333         }
334         return 0;
335
336
337 /*
338  * Change the page attributes of an page in the linear mapping.
339  *
340  * This should be used when a page is mapped with a different caching policy
341  * than write-back somewhere - some CPUs do not like it when mappings with
342  * different caching policies exist. This changes the page attributes of the
343  * in kernel linear mapping too.
344  * 
345  * The caller needs to ensure that there are no conflicting mappings elsewhere.
346  * This function only deals with the kernel linear map.
347  * 
348  * Caller must call global_flush_tlb() after this.
349  */
350 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
351 {
352         int err = 0; 
353         int i; 
354
355         down_write(&init_mm.mmap_sem);
356         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
357                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
358
359                 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
360                 if (err) 
361                         break; 
362                 /* Handle kernel mapping too which aliases part of the
363                  * lowmem */
364                 if (__pa(address) < KERNEL_TEXT_SIZE) {
365                         unsigned long addr2;
366                         pgprot_t prot2;
367                         addr2 = __START_KERNEL_map + __pa(address);
368                         /* Make sure the kernel mappings stay executable */
369                         prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
370                         err = __change_page_attr(addr2, pfn, prot2,
371                                                  PAGE_KERNEL_EXEC);
372                 } 
373         }       
374         up_write(&init_mm.mmap_sem); 
375         return err;
376 }
377
378 /* Don't call this for MMIO areas that may not have a mem_map entry */
379 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
380 {
381         unsigned long addr = (unsigned long)page_address(page);
382         return change_page_attr_addr(addr, numpages, prot);
383 }
384
385 void global_flush_tlb(void)
386
387         struct page *pg, *next;
388         struct list_head l;
389
390         down_read(&init_mm.mmap_sem);
391         list_replace_init(&deferred_pages, &l);
392         up_read(&init_mm.mmap_sem);
393
394         flush_map(&l);
395
396         list_for_each_entry_safe(pg, next, &l, lru) {
397                 ClearPagePrivate(pg);
398                 __free_page(pg);
399         } 
400
401
402 EXPORT_SYMBOL(change_page_attr);
403 EXPORT_SYMBOL(global_flush_tlb);