This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / x86_64 / mm / pageattr-xen.c
1 /* 
2  * Copyright 2002 Andi Kleen, SuSE Labs. 
3  * Thanks to Ben LaHaise for precious feedback.
4  */ 
5
6 #include <linux/config.h>
7 #include <linux/mm.h>
8 #include <linux/sched.h>
9 #include <linux/highmem.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <asm/uaccess.h>
13 #include <asm/processor.h>
14 #include <asm/tlbflush.h>
15 #include <asm/io.h>
16
17 #ifdef CONFIG_XEN
18 #include <asm/pgalloc.h>
19 #include <asm/mmu_context.h>
20
21 LIST_HEAD(mm_unpinned);
22 DEFINE_SPINLOCK(mm_unpinned_lock);
23
24 static inline void mm_walk_set_prot(void *pt, pgprot_t flags)
25 {
26         struct page *page = virt_to_page(pt);
27         unsigned long pfn = page_to_pfn(page);
28
29         BUG_ON(HYPERVISOR_update_va_mapping(
30                        (unsigned long)__va(pfn << PAGE_SHIFT),
31                        pfn_pte(pfn, flags), 0));
32 }
33
34 static void mm_walk(struct mm_struct *mm, pgprot_t flags)
35 {
36         pgd_t       *pgd;
37         pud_t       *pud;
38         pmd_t       *pmd;
39         pte_t       *pte;
40         int          g,u,m;
41
42         pgd = mm->pgd;
43         /*
44          * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
45          * be the 'current' task's pagetables (e.g., current may be 32-bit,
46          * but the pagetables may be for a 64-bit task).
47          * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
48          * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
49          */
50         for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
51                 if (pgd_none(*pgd))
52                         continue;
53                 pud = pud_offset(pgd, 0);
54                 if (PTRS_PER_PUD > 1) /* not folded */ 
55                         mm_walk_set_prot(pud,flags);
56                 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
57                         if (pud_none(*pud))
58                                 continue;
59                         pmd = pmd_offset(pud, 0);
60                         if (PTRS_PER_PMD > 1) /* not folded */ 
61                                 mm_walk_set_prot(pmd,flags);
62                         for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
63                                 if (pmd_none(*pmd))
64                                         continue;
65                                 pte = pte_offset_kernel(pmd,0);
66                                 mm_walk_set_prot(pte,flags);
67                         }
68                 }
69         }
70 }
71
72 void mm_pin(struct mm_struct *mm)
73 {
74         if (xen_feature(XENFEAT_writable_page_tables))
75                 return;
76
77         spin_lock(&mm->page_table_lock);
78
79         mm_walk(mm, PAGE_KERNEL_RO);
80         BUG_ON(HYPERVISOR_update_va_mapping(
81                        (unsigned long)mm->pgd,
82                        pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL_RO),
83                        UVMF_TLB_FLUSH));
84         BUG_ON(HYPERVISOR_update_va_mapping(
85                        (unsigned long)__user_pgd(mm->pgd),
86                        pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL_RO),
87                        UVMF_TLB_FLUSH));
88         xen_pgd_pin(__pa(mm->pgd)); /* kernel */
89         xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
90         mm->context.pinned = 1;
91         spin_lock(&mm_unpinned_lock);
92         list_del(&mm->context.unpinned);
93         spin_unlock(&mm_unpinned_lock);
94
95         spin_unlock(&mm->page_table_lock);
96 }
97
98 void mm_unpin(struct mm_struct *mm)
99 {
100         if (xen_feature(XENFEAT_writable_page_tables))
101                 return;
102
103         spin_lock(&mm->page_table_lock);
104
105         xen_pgd_unpin(__pa(mm->pgd));
106         xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
107         BUG_ON(HYPERVISOR_update_va_mapping(
108                        (unsigned long)mm->pgd,
109                        pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, PAGE_KERNEL), 0));
110         BUG_ON(HYPERVISOR_update_va_mapping(
111                        (unsigned long)__user_pgd(mm->pgd),
112                        pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, PAGE_KERNEL), 0));
113         mm_walk(mm, PAGE_KERNEL);
114         xen_tlb_flush();
115         mm->context.pinned = 0;
116         spin_lock(&mm_unpinned_lock);
117         list_add(&mm->context.unpinned, &mm_unpinned);
118         spin_unlock(&mm_unpinned_lock);
119
120         spin_unlock(&mm->page_table_lock);
121 }
122
123 void mm_pin_all(void)
124 {
125         if (xen_feature(XENFEAT_writable_page_tables))
126                 return;
127
128         while (!list_empty(&mm_unpinned))       
129                 mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
130                                   context.unpinned));
131 }
132
133 void _arch_dup_mmap(struct mm_struct *mm)
134 {
135     if (!mm->context.pinned)
136         mm_pin(mm);
137 }
138
139 void _arch_exit_mmap(struct mm_struct *mm)
140 {
141     struct task_struct *tsk = current;
142
143     task_lock(tsk);
144
145     /*
146      * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
147      * *much* faster this way, as no tlb flushes means bigger wrpt batches.
148      */
149     if ( tsk->active_mm == mm )
150     {
151         tsk->active_mm = &init_mm;
152         atomic_inc(&init_mm.mm_count);
153
154         switch_mm(mm, &init_mm, tsk);
155
156         atomic_dec(&mm->mm_count);
157         BUG_ON(atomic_read(&mm->mm_count) == 0);
158     }
159
160     task_unlock(tsk);
161
162     if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
163          !mm->context.has_foreign_mappings )
164         mm_unpin(mm);
165 }
166
167 void pte_free(struct page *pte)
168 {
169         unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
170
171         if (!pte_write(*virt_to_ptep(va)))
172                 BUG_ON(HYPERVISOR_update_va_mapping(
173                         va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
174         __free_page(pte);
175 }
176 #endif  /* CONFIG_XEN */
177
178 static inline pte_t *lookup_address(unsigned long address) 
179
180         pgd_t *pgd = pgd_offset_k(address);
181         pud_t *pud;
182         pmd_t *pmd;
183         pte_t *pte;
184         if (pgd_none(*pgd))
185                 return NULL;
186         pud = pud_offset(pgd, address);
187         if (!pud_present(*pud))
188                 return NULL; 
189         pmd = pmd_offset(pud, address);
190         if (!pmd_present(*pmd))
191                 return NULL; 
192         if (pmd_large(*pmd))
193                 return (pte_t *)pmd;
194         pte = pte_offset_kernel(pmd, address);
195         if (pte && !pte_present(*pte))
196                 pte = NULL; 
197         return pte;
198
199
200 static struct page *split_large_page(unsigned long address, pgprot_t prot,
201                                      pgprot_t ref_prot)
202
203         int i; 
204         unsigned long addr;
205         struct page *base = alloc_pages(GFP_KERNEL, 0);
206         pte_t *pbase;
207         if (!base) 
208                 return NULL;
209         /*
210          * page_private is used to track the number of entries in
211          * the page table page have non standard attributes.
212          */
213         SetPagePrivate(base);
214         page_private(base) = 0;
215
216         address = __pa(address);
217         addr = address & LARGE_PAGE_MASK; 
218         pbase = (pte_t *)page_address(base);
219         for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
220                 pbase[i] = pfn_pte(addr >> PAGE_SHIFT, 
221                                    addr == address ? prot : ref_prot);
222         }
223         return base;
224
225
226
227 static void flush_kernel_map(void *address) 
228 {
229         if (0 && address && cpu_has_clflush) {
230                 /* is this worth it? */ 
231                 int i;
232                 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 
233                         asm volatile("clflush (%0)" :: "r" (address + i)); 
234         } else
235                 asm volatile("wbinvd":::"memory"); 
236         if (address)
237                 __flush_tlb_one(address);
238         else
239                 __flush_tlb_all();
240 }
241
242
243 static inline void flush_map(unsigned long address)
244 {       
245         on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
246 }
247
248 static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
249
250 static inline void save_page(struct page *fpage)
251 {
252         fpage->lru.next = (struct list_head *)deferred_pages;
253         deferred_pages = fpage;
254 }
255
256 /* 
257  * No more special protections in this 2/4MB area - revert to a
258  * large page again. 
259  */
260 static void revert_page(unsigned long address, pgprot_t ref_prot)
261 {
262         pgd_t *pgd;
263         pud_t *pud;
264         pmd_t *pmd;
265         pte_t large_pte;
266
267         pgd = pgd_offset_k(address);
268         BUG_ON(pgd_none(*pgd));
269         pud = pud_offset(pgd,address);
270         BUG_ON(pud_none(*pud));
271         pmd = pmd_offset(pud, address);
272         BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
273         pgprot_val(ref_prot) |= _PAGE_PSE;
274         large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
275         set_pte((pte_t *)pmd, large_pte);
276 }      
277
278 static int
279 __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
280                                    pgprot_t ref_prot)
281
282         pte_t *kpte; 
283         struct page *kpte_page;
284         unsigned kpte_flags;
285         pgprot_t ref_prot2;
286         kpte = lookup_address(address);
287         if (!kpte) return 0;
288         kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
289         kpte_flags = pte_val(*kpte); 
290         if (pgprot_val(prot) != pgprot_val(ref_prot)) { 
291                 if ((kpte_flags & _PAGE_PSE) == 0) { 
292                         set_pte(kpte, pfn_pte(pfn, prot));
293                 } else {
294                         /*
295                          * split_large_page will take the reference for this
296                          * change_page_attr on the split page.
297                          */
298
299                         struct page *split;
300                         ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
301
302                         split = split_large_page(address, prot, ref_prot2);
303                         if (!split)
304                                 return -ENOMEM;
305                         set_pte(kpte,mk_pte(split, ref_prot2));
306                         kpte_page = split;
307                 }       
308                 page_private(kpte_page)++;
309         } else if ((kpte_flags & _PAGE_PSE) == 0) { 
310                 set_pte(kpte, pfn_pte(pfn, ref_prot));
311                 BUG_ON(page_private(kpte_page) == 0);
312                 page_private(kpte_page)--;
313         } else
314                 BUG();
315
316         /* on x86-64 the direct mapping set at boot is not using 4k pages */
317         /*
318          * ..., but the XEN guest kernels (currently) do:
319          * If the pte was reserved, it means it was created at boot
320          * time (not via split_large_page) and in turn we must not
321          * replace it with a large page.
322          */
323 #ifndef CONFIG_XEN
324         BUG_ON(PageReserved(kpte_page));
325 #endif
326         if (page_private(kpte_page) == 0) {
327                 save_page(kpte_page);
328                 revert_page(address, ref_prot);
329         }
330         return 0;
331
332
333 /*
334  * Change the page attributes of an page in the linear mapping.
335  *
336  * This should be used when a page is mapped with a different caching policy
337  * than write-back somewhere - some CPUs do not like it when mappings with
338  * different caching policies exist. This changes the page attributes of the
339  * in kernel linear mapping too.
340  * 
341  * The caller needs to ensure that there are no conflicting mappings elsewhere.
342  * This function only deals with the kernel linear map.
343  * 
344  * Caller must call global_flush_tlb() after this.
345  */
346 int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
347 {
348         int err = 0; 
349         int i; 
350
351         down_write(&init_mm.mmap_sem);
352         for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
353                 unsigned long pfn = __pa(address) >> PAGE_SHIFT;
354
355                 err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
356                 if (err) 
357                         break; 
358                 /* Handle kernel mapping too which aliases part of the
359                  * lowmem */
360                 if (__pa(address) < KERNEL_TEXT_SIZE) {
361                         unsigned long addr2;
362                         pgprot_t prot2 = prot;
363                         addr2 = __START_KERNEL_map + __pa(address);
364                         pgprot_val(prot2) &= ~_PAGE_NX;
365                         err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
366                 } 
367         }       
368         up_write(&init_mm.mmap_sem); 
369         return err;
370 }
371
372 /* Don't call this for MMIO areas that may not have a mem_map entry */
373 int change_page_attr(struct page *page, int numpages, pgprot_t prot)
374 {
375         unsigned long addr = (unsigned long)page_address(page);
376         return change_page_attr_addr(addr, numpages, prot);
377 }
378
379 void global_flush_tlb(void)
380
381         struct page *dpage;
382
383         down_read(&init_mm.mmap_sem);
384         dpage = xchg(&deferred_pages, NULL);
385         up_read(&init_mm.mmap_sem);
386
387         flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
388         while (dpage) {
389                 struct page *tmp = dpage;
390                 dpage = (struct page *)dpage->lru.next;
391                 ClearPagePrivate(tmp);
392                 __free_page(tmp);
393         } 
394
395
396 EXPORT_SYMBOL(change_page_attr);
397 EXPORT_SYMBOL(global_flush_tlb);