This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / i386 / mm / pgtable-xen.c
1 /*
2  *  linux/arch/i386/mm/pgtable.c
3  */
4
5 #include <linux/config.h>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17
18 #include <asm/system.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
21 #include <asm/fixmap.h>
22 #include <asm/e820.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbflush.h>
25 #include <asm/io.h>
26 #include <asm/mmu_context.h>
27
28 #include <xen/features.h>
29 #include <xen/foreign_page.h>
30 #include <asm/hypervisor.h>
31
32 static void pgd_test_and_unpin(pgd_t *pgd);
33
34 void show_mem(void)
35 {
36         int total = 0, reserved = 0;
37         int shared = 0, cached = 0;
38         int highmem = 0;
39         struct page *page;
40         pg_data_t *pgdat;
41         unsigned long i;
42         struct page_state ps;
43         unsigned long flags;
44
45         printk(KERN_INFO "Mem-info:\n");
46         show_free_areas();
47         printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
48         for_each_online_pgdat(pgdat) {
49                 pgdat_resize_lock(pgdat, &flags);
50                 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
51                         page = pgdat_page_nr(pgdat, i);
52                         total++;
53                         if (PageHighMem(page))
54                                 highmem++;
55                         if (PageReserved(page))
56                                 reserved++;
57                         else if (PageSwapCache(page))
58                                 cached++;
59                         else if (page_count(page))
60                                 shared += page_count(page) - 1;
61                 }
62                 pgdat_resize_unlock(pgdat, &flags);
63         }
64         printk(KERN_INFO "%d pages of RAM\n", total);
65         printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
66         printk(KERN_INFO "%d reserved pages\n", reserved);
67         printk(KERN_INFO "%d pages shared\n", shared);
68         printk(KERN_INFO "%d pages swap cached\n", cached);
69
70         get_page_state(&ps);
71         printk(KERN_INFO "%lu pages dirty\n", ps.nr_dirty);
72         printk(KERN_INFO "%lu pages writeback\n", ps.nr_writeback);
73         printk(KERN_INFO "%lu pages mapped\n", ps.nr_mapped);
74         printk(KERN_INFO "%lu pages slab\n", ps.nr_slab);
75         printk(KERN_INFO "%lu pages pagetables\n", ps.nr_page_table_pages);
76 }
77
78 /*
79  * Associate a virtual page frame with a given physical page frame 
80  * and protection flags for that frame.
81  */ 
82 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
83 {
84         pgd_t *pgd;
85         pud_t *pud;
86         pmd_t *pmd;
87         pte_t *pte;
88
89         pgd = swapper_pg_dir + pgd_index(vaddr);
90         if (pgd_none(*pgd)) {
91                 BUG();
92                 return;
93         }
94         pud = pud_offset(pgd, vaddr);
95         if (pud_none(*pud)) {
96                 BUG();
97                 return;
98         }
99         pmd = pmd_offset(pud, vaddr);
100         if (pmd_none(*pmd)) {
101                 BUG();
102                 return;
103         }
104         pte = pte_offset_kernel(pmd, vaddr);
105         /* <pfn,flags> stored as-is, to permit clearing entries */
106         set_pte(pte, pfn_pte(pfn, flags));
107
108         /*
109          * It's enough to flush this one mapping.
110          * (PGE mappings get flushed as well)
111          */
112         __flush_tlb_one(vaddr);
113 }
114
115 /*
116  * Associate a virtual page frame with a given physical page frame 
117  * and protection flags for that frame.
118  */ 
119 static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
120                            pgprot_t flags)
121 {
122         pgd_t *pgd;
123         pud_t *pud;
124         pmd_t *pmd;
125         pte_t *pte;
126
127         pgd = swapper_pg_dir + pgd_index(vaddr);
128         if (pgd_none(*pgd)) {
129                 BUG();
130                 return;
131         }
132         pud = pud_offset(pgd, vaddr);
133         if (pud_none(*pud)) {
134                 BUG();
135                 return;
136         }
137         pmd = pmd_offset(pud, vaddr);
138         if (pmd_none(*pmd)) {
139                 BUG();
140                 return;
141         }
142         pte = pte_offset_kernel(pmd, vaddr);
143         /* <pfn,flags> stored as-is, to permit clearing entries */
144         set_pte(pte, pfn_pte_ma(pfn, flags));
145
146         /*
147          * It's enough to flush this one mapping.
148          * (PGE mappings get flushed as well)
149          */
150         __flush_tlb_one(vaddr);
151 }
152
153 /*
154  * Associate a large virtual page frame with a given physical page frame 
155  * and protection flags for that frame. pfn is for the base of the page,
156  * vaddr is what the page gets mapped to - both must be properly aligned. 
157  * The pmd must already be instantiated. Assumes PAE mode.
158  */ 
159 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
160 {
161         pgd_t *pgd;
162         pud_t *pud;
163         pmd_t *pmd;
164
165         if (vaddr & (PMD_SIZE-1)) {             /* vaddr is misaligned */
166                 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
167                 return; /* BUG(); */
168         }
169         if (pfn & (PTRS_PER_PTE-1)) {           /* pfn is misaligned */
170                 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
171                 return; /* BUG(); */
172         }
173         pgd = swapper_pg_dir + pgd_index(vaddr);
174         if (pgd_none(*pgd)) {
175                 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
176                 return; /* BUG(); */
177         }
178         pud = pud_offset(pgd, vaddr);
179         pmd = pmd_offset(pud, vaddr);
180         set_pmd(pmd, pfn_pmd(pfn, flags));
181         /*
182          * It's enough to flush this one mapping.
183          * (PGE mappings get flushed as well)
184          */
185         __flush_tlb_one(vaddr);
186 }
187
188 static int nr_fixmaps = 0;
189 unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
190 EXPORT_SYMBOL(__FIXADDR_TOP);
191
192 void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
193 {
194         unsigned long address = __fix_to_virt(idx);
195
196         if (idx >= __end_of_fixed_addresses) {
197                 BUG();
198                 return;
199         }
200         switch (idx) {
201         case FIX_WP_TEST:
202         case FIX_VSYSCALL:
203 #ifdef CONFIG_X86_F00F_BUG
204         case FIX_F00F_IDT:
205 #endif
206                 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
207                 break;
208         default:
209                 set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
210                 break;
211         }
212         nr_fixmaps++;
213 }
214
215 void set_fixaddr_top(unsigned long top)
216 {
217         BUG_ON(nr_fixmaps > 0);
218         __FIXADDR_TOP = top - PAGE_SIZE;
219 }
220
221 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
222 {
223         pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
224         if (pte)
225                 make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
226         return pte;
227 }
228
229 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
230 {
231         struct page *pte;
232
233 #ifdef CONFIG_HIGHPTE
234         pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
235 #else
236         pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
237         if (pte) {
238                 SetPageForeign(pte, pte_free);
239                 init_page_count(pte);
240         }
241 #endif
242         return pte;
243 }
244
245 void pte_free(struct page *pte)
246 {
247         unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
248
249         if (!pte_write(*virt_to_ptep(va)))
250                 BUG_ON(HYPERVISOR_update_va_mapping(
251                         va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
252
253         ClearPageForeign(pte);
254         init_page_count(pte);
255
256         __free_page(pte);
257 }
258
259 void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
260 {
261         memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
262 }
263
264 /*
265  * List of all pgd's needed for non-PAE so it can invalidate entries
266  * in both cached and uncached pgd's; not needed for PAE since the
267  * kernel pmd is shared. If PAE were not to share the pmd a similar
268  * tactic would be needed. This is essentially codepath-based locking
269  * against pageattr.c; it is the unique case in which a valid change
270  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
271  * vmalloc faults work because attached pagetables are never freed.
272  * The locking scheme was chosen on the basis of manfred's
273  * recommendations and having no core impact whatsoever.
274  * -- wli
275  */
276 DEFINE_SPINLOCK(pgd_lock);
277 struct page *pgd_list;
278
279 static inline void pgd_list_add(pgd_t *pgd)
280 {
281         struct page *page = virt_to_page(pgd);
282         page->index = (unsigned long)pgd_list;
283         if (pgd_list)
284                 set_page_private(pgd_list, (unsigned long)&page->index);
285         pgd_list = page;
286         set_page_private(page, (unsigned long)&pgd_list);
287 }
288
289 static inline void pgd_list_del(pgd_t *pgd)
290 {
291         struct page *next, **pprev, *page = virt_to_page(pgd);
292         next = (struct page *)page->index;
293         pprev = (struct page **)page_private(page);
294         *pprev = next;
295         if (next)
296                 set_page_private(next, (unsigned long)pprev);
297 }
298
299 void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
300 {
301         unsigned long flags;
302
303         if (PTRS_PER_PMD > 1) {
304                 if (HAVE_SHARED_KERNEL_PMD)
305                         clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
306                                         swapper_pg_dir + USER_PTRS_PER_PGD,
307                                         KERNEL_PGD_PTRS);
308         } else {
309                 spin_lock_irqsave(&pgd_lock, flags);
310                 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
311                                 swapper_pg_dir + USER_PTRS_PER_PGD,
312                                 KERNEL_PGD_PTRS);
313                 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
314                 pgd_list_add(pgd);
315                 spin_unlock_irqrestore(&pgd_lock, flags);
316         }
317 }
318
319 /* never called when PTRS_PER_PMD > 1 */
320 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
321 {
322         unsigned long flags; /* can be called from interrupt context */
323
324         spin_lock_irqsave(&pgd_lock, flags);
325         pgd_list_del(pgd);
326         spin_unlock_irqrestore(&pgd_lock, flags);
327
328         pgd_test_and_unpin(pgd);
329 }
330
331 pgd_t *pgd_alloc(struct mm_struct *mm)
332 {
333         int i;
334         pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
335         pmd_t **pmd;
336         unsigned long flags;
337
338         pgd_test_and_unpin(pgd);
339
340         if (PTRS_PER_PMD == 1 || !pgd)
341                 return pgd;
342
343         if (HAVE_SHARED_KERNEL_PMD) {
344                 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
345                         pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
346                         if (!pmd)
347                                 goto out_oom;
348                         set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
349                 }
350                 return pgd;
351         }
352
353         /*
354          * We can race save/restore (if we sleep during a GFP_KERNEL memory
355          * allocation). We therefore store virtual addresses of pmds as they
356          * do not change across save/restore, and poke the machine addresses
357          * into the pgdir under the pgd_lock.
358          */
359         pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
360         if (!pmd) {
361                 kmem_cache_free(pgd_cache, pgd);
362                 return NULL;
363         }
364
365         /* Allocate pmds, remember virtual addresses. */
366         for (i = 0; i < PTRS_PER_PGD; ++i) {
367                 pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
368                 if (!pmd[i])
369                         goto out_oom;
370         }
371
372         spin_lock_irqsave(&pgd_lock, flags);
373
374         /* Protect against save/restore: move below 4GB under pgd_lock. */
375         if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
376                 int rc = xen_create_contiguous_region(
377                         (unsigned long)pgd, 0, 32);
378                 if (rc) {
379                         spin_unlock_irqrestore(&pgd_lock, flags);
380                         goto out_oom;
381                 }
382         }
383
384         /* Copy kernel pmd contents and write-protect the new pmds. */
385         for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
386                 unsigned long v = (unsigned long)i << PGDIR_SHIFT;
387                 pgd_t *kpgd = pgd_offset_k(v);
388                 pud_t *kpud = pud_offset(kpgd, v);
389                 pmd_t *kpmd = pmd_offset(kpud, v);
390                 memcpy(pmd[i], kpmd, PAGE_SIZE);
391                 make_lowmem_page_readonly(
392                         pmd[i], XENFEAT_writable_page_tables);
393         }
394
395         /* It is safe to poke machine addresses of pmds under the pmd_lock. */
396         for (i = 0; i < PTRS_PER_PGD; i++)
397                 set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
398
399         /* Ensure this pgd gets picked up and pinned on save/restore. */
400         pgd_list_add(pgd);
401
402         spin_unlock_irqrestore(&pgd_lock, flags);
403
404         kfree(pmd);
405
406         return pgd;
407
408 out_oom:
409         if (HAVE_SHARED_KERNEL_PMD) {
410                 for (i--; i >= 0; i--)
411                         kmem_cache_free(pmd_cache,
412                                         (void *)__va(pgd_val(pgd[i])-1));
413         } else {
414                 for (i--; i >= 0; i--)
415                         kmem_cache_free(pmd_cache, pmd[i]);
416                 kfree(pmd);
417         }
418         kmem_cache_free(pgd_cache, pgd);
419         return NULL;
420 }
421
422 void pgd_free(pgd_t *pgd)
423 {
424         int i;
425
426         /*
427          * After this the pgd should not be pinned for the duration of this
428          * function's execution. We should never sleep and thus never race:
429          *  1. User pmds will not become write-protected under our feet due
430          *     to a concurrent mm_pin_all().
431          *  2. The machine addresses in PGD entries will not become invalid
432          *     due to a concurrent save/restore.
433          */
434         pgd_test_and_unpin(pgd);
435
436         /* in the PAE case user pgd entries are overwritten before usage */
437         if (PTRS_PER_PMD > 1) {
438                 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
439                         pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
440                         kmem_cache_free(pmd_cache, pmd);
441                 }
442
443                 if (!HAVE_SHARED_KERNEL_PMD) {
444                         unsigned long flags;
445                         spin_lock_irqsave(&pgd_lock, flags);
446                         pgd_list_del(pgd);
447                         spin_unlock_irqrestore(&pgd_lock, flags);
448
449                         for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
450                                 pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
451                                 make_lowmem_page_writable(
452                                         pmd, XENFEAT_writable_page_tables);
453                                 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
454                                 kmem_cache_free(pmd_cache, pmd);
455                         }
456
457                         if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
458                                 xen_destroy_contiguous_region(
459                                         (unsigned long)pgd, 0);
460                 }
461         }
462
463         /* in the non-PAE case, free_pgtables() clears user pgd entries */
464         kmem_cache_free(pgd_cache, pgd);
465 }
466
467 void make_lowmem_page_readonly(void *va, unsigned int feature)
468 {
469         pte_t *pte;
470         int rc;
471
472         if (xen_feature(feature))
473                 return;
474
475         pte = virt_to_ptep(va);
476         rc = HYPERVISOR_update_va_mapping(
477                 (unsigned long)va, pte_wrprotect(*pte), 0);
478         BUG_ON(rc);
479 }
480
481 void make_lowmem_page_writable(void *va, unsigned int feature)
482 {
483         pte_t *pte;
484         int rc;
485
486         if (xen_feature(feature))
487                 return;
488
489         pte = virt_to_ptep(va);
490         rc = HYPERVISOR_update_va_mapping(
491                 (unsigned long)va, pte_mkwrite(*pte), 0);
492         BUG_ON(rc);
493 }
494
495 void make_page_readonly(void *va, unsigned int feature)
496 {
497         pte_t *pte;
498         int rc;
499
500         if (xen_feature(feature))
501                 return;
502
503         pte = virt_to_ptep(va);
504         rc = HYPERVISOR_update_va_mapping(
505                 (unsigned long)va, pte_wrprotect(*pte), 0);
506         if (rc) /* fallback? */
507                 xen_l1_entry_update(pte, pte_wrprotect(*pte));
508         if ((unsigned long)va >= (unsigned long)high_memory) {
509                 unsigned long pfn = pte_pfn(*pte);
510 #ifdef CONFIG_HIGHMEM
511                 if (pfn >= highstart_pfn)
512                         kmap_flush_unused(); /* flush stale writable kmaps */
513                 else
514 #endif
515                         make_lowmem_page_readonly(
516                                 phys_to_virt(pfn << PAGE_SHIFT), feature); 
517         }
518 }
519
520 void make_page_writable(void *va, unsigned int feature)
521 {
522         pte_t *pte;
523         int rc;
524
525         if (xen_feature(feature))
526                 return;
527
528         pte = virt_to_ptep(va);
529         rc = HYPERVISOR_update_va_mapping(
530                 (unsigned long)va, pte_mkwrite(*pte), 0);
531         if (rc) /* fallback? */
532                 xen_l1_entry_update(pte, pte_mkwrite(*pte));
533         if ((unsigned long)va >= (unsigned long)high_memory) {
534                 unsigned long pfn = pte_pfn(*pte); 
535 #ifdef CONFIG_HIGHMEM
536                 if (pfn < highstart_pfn)
537 #endif
538                         make_lowmem_page_writable(
539                                 phys_to_virt(pfn << PAGE_SHIFT), feature);
540         }
541 }
542
543 void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
544 {
545         if (xen_feature(feature))
546                 return;
547
548         while (nr-- != 0) {
549                 make_page_readonly(va, feature);
550                 va = (void *)((unsigned long)va + PAGE_SIZE);
551         }
552 }
553
554 void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
555 {
556         if (xen_feature(feature))
557                 return;
558
559         while (nr-- != 0) {
560                 make_page_writable(va, feature);
561                 va = (void *)((unsigned long)va + PAGE_SIZE);
562         }
563 }
564
565 static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
566 {
567         struct page *page = virt_to_page(pt);
568         unsigned long pfn = page_to_pfn(page);
569
570         if (PageHighMem(page))
571                 return;
572         BUG_ON(HYPERVISOR_update_va_mapping(
573                 (unsigned long)__va(pfn << PAGE_SHIFT),
574                 pfn_pte(pfn, flags), 0));
575 }
576
577 static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
578 {
579         pgd_t *pgd = pgd_base;
580         pud_t *pud;
581         pmd_t *pmd;
582         pte_t *pte;
583         int    g, u, m;
584
585         if (xen_feature(XENFEAT_auto_translated_physmap))
586                 return;
587
588         for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
589                 if (pgd_none(*pgd))
590                         continue;
591                 pud = pud_offset(pgd, 0);
592                 if (PTRS_PER_PUD > 1) /* not folded */
593                         pgd_walk_set_prot(pud,flags);
594                 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
595                         if (pud_none(*pud))
596                                 continue;
597                         pmd = pmd_offset(pud, 0);
598                         if (PTRS_PER_PMD > 1) /* not folded */
599                                 pgd_walk_set_prot(pmd,flags);
600                         for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
601                                 if (pmd_none(*pmd))
602                                         continue;
603                                 pte = pte_offset_kernel(pmd,0);
604                                 pgd_walk_set_prot(pte,flags);
605                         }
606                 }
607         }
608
609         BUG_ON(HYPERVISOR_update_va_mapping(
610                 (unsigned long)pgd_base,
611                 pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
612                 UVMF_TLB_FLUSH));
613 }
614
615 static void __pgd_pin(pgd_t *pgd)
616 {
617         pgd_walk(pgd, PAGE_KERNEL_RO);
618         xen_pgd_pin(__pa(pgd));
619         set_bit(PG_pinned, &virt_to_page(pgd)->flags);
620 }
621
622 static void __pgd_unpin(pgd_t *pgd)
623 {
624         xen_pgd_unpin(__pa(pgd));
625         pgd_walk(pgd, PAGE_KERNEL);
626         clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
627 }
628
629 static void pgd_test_and_unpin(pgd_t *pgd)
630 {
631         if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
632                 __pgd_unpin(pgd);
633 }
634
635 void mm_pin(struct mm_struct *mm)
636 {
637         if (xen_feature(XENFEAT_writable_page_tables))
638                 return;
639         spin_lock(&mm->page_table_lock);
640         __pgd_pin(mm->pgd);
641         spin_unlock(&mm->page_table_lock);
642 }
643
644 void mm_unpin(struct mm_struct *mm)
645 {
646         if (xen_feature(XENFEAT_writable_page_tables))
647                 return;
648         spin_lock(&mm->page_table_lock);
649         __pgd_unpin(mm->pgd);
650         spin_unlock(&mm->page_table_lock);
651 }
652
653 void mm_pin_all(void)
654 {
655         struct page *page;
656
657         /* Only pgds on the pgd_list please: none hidden in the slab cache. */
658         kmem_cache_shrink(pgd_cache);
659
660         if (xen_feature(XENFEAT_writable_page_tables))
661                 return;
662
663         for (page = pgd_list; page; page = (struct page *)page->index) {
664                 if (!test_bit(PG_pinned, &page->flags))
665                         __pgd_pin((pgd_t *)page_address(page));
666         }
667 }
668
669 void _arch_dup_mmap(struct mm_struct *mm)
670 {
671         if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
672                 mm_pin(mm);
673 }
674
675 void _arch_exit_mmap(struct mm_struct *mm)
676 {
677         struct task_struct *tsk = current;
678
679         task_lock(tsk);
680
681         /*
682          * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
683          * *much* faster this way, as no tlb flushes means bigger wrpt batches.
684          */
685         if (tsk->active_mm == mm) {
686                 tsk->active_mm = &init_mm;
687                 atomic_inc(&init_mm.mm_count);
688
689                 switch_mm(mm, &init_mm, tsk);
690
691                 atomic_dec(&mm->mm_count);
692                 BUG_ON(atomic_read(&mm->mm_count) == 0);
693         }
694
695         task_unlock(tsk);
696
697         if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
698             (atomic_read(&mm->mm_count) == 1) &&
699             !mm->context.has_foreign_mappings)
700                 mm_unpin(mm);
701 }