2 * linux/arch/i386/mm/pgtable.c
5 #include <linux/sched.h>
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
9 #include <linux/swap.h>
10 #include <linux/smp.h>
11 #include <linux/highmem.h>
12 #include <linux/slab.h>
13 #include <linux/pagemap.h>
14 #include <linux/spinlock.h>
15 #include <linux/module.h>
17 #include <asm/system.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/fixmap.h>
23 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 #include <xen/features.h>
28 #include <xen/foreign_page.h>
29 #include <asm/hypervisor.h>
31 static void pgd_test_and_unpin(pgd_t *pgd);
35 int total = 0, reserved = 0;
36 int shared = 0, cached = 0;
43 printk(KERN_INFO "Mem-info:\n");
45 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
46 for_each_online_pgdat(pgdat) {
47 pgdat_resize_lock(pgdat, &flags);
48 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
49 page = pgdat_page_nr(pgdat, i);
51 if (PageHighMem(page))
53 if (PageReserved(page))
55 else if (PageSwapCache(page))
57 else if (page_count(page))
58 shared += page_count(page) - 1;
60 pgdat_resize_unlock(pgdat, &flags);
62 printk(KERN_INFO "%d pages of RAM\n", total);
63 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
64 printk(KERN_INFO "%d reserved pages\n", reserved);
65 printk(KERN_INFO "%d pages shared\n", shared);
66 printk(KERN_INFO "%d pages swap cached\n", cached);
68 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
69 printk(KERN_INFO "%lu pages writeback\n",
70 global_page_state(NR_WRITEBACK));
71 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
72 printk(KERN_INFO "%lu pages slab\n",
73 global_page_state(NR_SLAB_RECLAIMABLE) +
74 global_page_state(NR_SLAB_UNRECLAIMABLE));
75 printk(KERN_INFO "%lu pages pagetables\n",
76 global_page_state(NR_PAGETABLE));
80 * Associate a virtual page frame with a given physical page frame
81 * and protection flags for that frame.
83 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
90 pgd = swapper_pg_dir + pgd_index(vaddr);
95 pud = pud_offset(pgd, vaddr);
100 pmd = pmd_offset(pud, vaddr);
101 if (pmd_none(*pmd)) {
105 pte = pte_offset_kernel(pmd, vaddr);
106 if (pgprot_val(flags))
107 /* <pfn,flags> stored as-is, to permit clearing entries */
108 set_pte(pte, pfn_pte(pfn, flags));
110 pte_clear(&init_mm, vaddr, pte);
113 * It's enough to flush this one mapping.
114 * (PGE mappings get flushed as well)
116 __flush_tlb_one(vaddr);
120 * Associate a virtual page frame with a given physical page frame
121 * and protection flags for that frame.
123 static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
131 pgd = swapper_pg_dir + pgd_index(vaddr);
132 if (pgd_none(*pgd)) {
136 pud = pud_offset(pgd, vaddr);
137 if (pud_none(*pud)) {
141 pmd = pmd_offset(pud, vaddr);
142 if (pmd_none(*pmd)) {
146 pte = pte_offset_kernel(pmd, vaddr);
147 /* <pfn,flags> stored as-is, to permit clearing entries */
148 set_pte(pte, pfn_pte_ma(pfn, flags));
151 * It's enough to flush this one mapping.
152 * (PGE mappings get flushed as well)
154 __flush_tlb_one(vaddr);
158 * Associate a large virtual page frame with a given physical page frame
159 * and protection flags for that frame. pfn is for the base of the page,
160 * vaddr is what the page gets mapped to - both must be properly aligned.
161 * The pmd must already be instantiated. Assumes PAE mode.
163 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
169 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
170 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
173 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
174 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
177 pgd = swapper_pg_dir + pgd_index(vaddr);
178 if (pgd_none(*pgd)) {
179 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
182 pud = pud_offset(pgd, vaddr);
183 pmd = pmd_offset(pud, vaddr);
184 set_pmd(pmd, pfn_pmd(pfn, flags));
186 * It's enough to flush this one mapping.
187 * (PGE mappings get flushed as well)
189 __flush_tlb_one(vaddr);
193 #ifndef CONFIG_COMPAT_VDSO
194 unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
195 EXPORT_SYMBOL(__FIXADDR_TOP);
198 void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
200 unsigned long address = __fix_to_virt(idx);
202 if (idx >= __end_of_fixed_addresses) {
208 #ifdef CONFIG_X86_F00F_BUG
211 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
214 set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
221 * reserve_top_address - reserves a hole in the top of kernel address space
222 * @reserve - size of hole to reserve
224 * Can be used to relocate the fixmap area and poke a hole in the top
225 * of kernel address space to make room for a hypervisor.
227 void reserve_top_address(unsigned long reserve)
230 #ifdef CONFIG_COMPAT_VDSO
231 BUG_ON(reserve != 0);
233 __FIXADDR_TOP = -reserve - PAGE_SIZE;
234 __VMALLOC_RESERVE += reserve;
238 void set_fixaddr_top(unsigned long top)
241 __FIXADDR_TOP = top - PAGE_SIZE;
244 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
246 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
248 make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
252 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
256 #ifdef CONFIG_HIGHPTE
257 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
259 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
261 SetPageForeign(pte, pte_free);
262 init_page_count(pte);
268 void pte_free(struct page *pte)
270 unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
272 if (!pte_write(*virt_to_ptep(va)))
273 BUG_ON(HYPERVISOR_update_va_mapping(
274 va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0));
276 ClearPageForeign(pte);
277 init_page_count(pte);
282 void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
284 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
288 * List of all pgd's needed for non-PAE so it can invalidate entries
289 * in both cached and uncached pgd's; not needed for PAE since the
290 * kernel pmd is shared. If PAE were not to share the pmd a similar
291 * tactic would be needed. This is essentially codepath-based locking
292 * against pageattr.c; it is the unique case in which a valid change
293 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
294 * vmalloc faults work because attached pagetables are never freed.
295 * The locking scheme was chosen on the basis of manfred's
296 * recommendations and having no core impact whatsoever.
299 DEFINE_SPINLOCK(pgd_lock);
300 struct page *pgd_list;
302 static inline void pgd_list_add(pgd_t *pgd)
304 struct page *page = virt_to_page(pgd);
305 page->index = (unsigned long)pgd_list;
307 set_page_private(pgd_list, (unsigned long)&page->index);
309 set_page_private(page, (unsigned long)&pgd_list);
312 static inline void pgd_list_del(pgd_t *pgd)
314 struct page *next, **pprev, *page = virt_to_page(pgd);
315 next = (struct page *)page->index;
316 pprev = (struct page **)page_private(page);
319 set_page_private(next, (unsigned long)pprev);
322 void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
326 if (PTRS_PER_PMD > 1) {
327 if (HAVE_SHARED_KERNEL_PMD)
328 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
329 swapper_pg_dir + USER_PTRS_PER_PGD,
332 spin_lock_irqsave(&pgd_lock, flags);
333 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
334 swapper_pg_dir + USER_PTRS_PER_PGD,
336 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
338 spin_unlock_irqrestore(&pgd_lock, flags);
342 /* never called when PTRS_PER_PMD > 1 */
343 void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
345 unsigned long flags; /* can be called from interrupt context */
347 spin_lock_irqsave(&pgd_lock, flags);
349 spin_unlock_irqrestore(&pgd_lock, flags);
351 pgd_test_and_unpin(pgd);
354 pgd_t *pgd_alloc(struct mm_struct *mm)
357 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
361 pgd_test_and_unpin(pgd);
363 if (PTRS_PER_PMD == 1 || !pgd)
366 if (HAVE_SHARED_KERNEL_PMD) {
367 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
368 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
371 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
377 * We can race save/restore (if we sleep during a GFP_KERNEL memory
378 * allocation). We therefore store virtual addresses of pmds as they
379 * do not change across save/restore, and poke the machine addresses
380 * into the pgdir under the pgd_lock.
382 pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
384 kmem_cache_free(pgd_cache, pgd);
388 /* Allocate pmds, remember virtual addresses. */
389 for (i = 0; i < PTRS_PER_PGD; ++i) {
390 pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
395 spin_lock_irqsave(&pgd_lock, flags);
397 /* Protect against save/restore: move below 4GB under pgd_lock. */
398 if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
399 int rc = xen_create_contiguous_region(
400 (unsigned long)pgd, 0, 32);
402 spin_unlock_irqrestore(&pgd_lock, flags);
407 /* Copy kernel pmd contents and write-protect the new pmds. */
408 for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
409 unsigned long v = (unsigned long)i << PGDIR_SHIFT;
410 pgd_t *kpgd = pgd_offset_k(v);
411 pud_t *kpud = pud_offset(kpgd, v);
412 pmd_t *kpmd = pmd_offset(kpud, v);
413 memcpy(pmd[i], kpmd, PAGE_SIZE);
414 make_lowmem_page_readonly(
415 pmd[i], XENFEAT_writable_page_tables);
418 /* It is safe to poke machine addresses of pmds under the pmd_lock. */
419 for (i = 0; i < PTRS_PER_PGD; i++)
420 set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
422 /* Ensure this pgd gets picked up and pinned on save/restore. */
425 spin_unlock_irqrestore(&pgd_lock, flags);
432 if (HAVE_SHARED_KERNEL_PMD) {
433 for (i--; i >= 0; i--)
434 kmem_cache_free(pmd_cache,
435 (void *)__va(pgd_val(pgd[i])-1));
437 for (i--; i >= 0; i--)
438 kmem_cache_free(pmd_cache, pmd[i]);
441 kmem_cache_free(pgd_cache, pgd);
445 void pgd_free(pgd_t *pgd)
450 * After this the pgd should not be pinned for the duration of this
451 * function's execution. We should never sleep and thus never race:
452 * 1. User pmds will not become write-protected under our feet due
453 * to a concurrent mm_pin_all().
454 * 2. The machine addresses in PGD entries will not become invalid
455 * due to a concurrent save/restore.
457 pgd_test_and_unpin(pgd);
459 /* in the PAE case user pgd entries are overwritten before usage */
460 if (PTRS_PER_PMD > 1) {
461 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
462 pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
463 kmem_cache_free(pmd_cache, pmd);
466 if (!HAVE_SHARED_KERNEL_PMD) {
468 spin_lock_irqsave(&pgd_lock, flags);
470 spin_unlock_irqrestore(&pgd_lock, flags);
472 for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
473 pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
474 make_lowmem_page_writable(
475 pmd, XENFEAT_writable_page_tables);
476 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
477 kmem_cache_free(pmd_cache, pmd);
480 if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
481 xen_destroy_contiguous_region(
482 (unsigned long)pgd, 0);
486 /* in the non-PAE case, free_pgtables() clears user pgd entries */
487 kmem_cache_free(pgd_cache, pgd);
490 void make_lowmem_page_readonly(void *va, unsigned int feature)
495 if (xen_feature(feature))
498 pte = virt_to_ptep(va);
499 rc = HYPERVISOR_update_va_mapping(
500 (unsigned long)va, pte_wrprotect(*pte), 0);
504 void make_lowmem_page_writable(void *va, unsigned int feature)
509 if (xen_feature(feature))
512 pte = virt_to_ptep(va);
513 rc = HYPERVISOR_update_va_mapping(
514 (unsigned long)va, pte_mkwrite(*pte), 0);
518 void make_page_readonly(void *va, unsigned int feature)
523 if (xen_feature(feature))
526 pte = virt_to_ptep(va);
527 rc = HYPERVISOR_update_va_mapping(
528 (unsigned long)va, pte_wrprotect(*pte), 0);
529 if (rc) /* fallback? */
530 xen_l1_entry_update(pte, pte_wrprotect(*pte));
531 if ((unsigned long)va >= (unsigned long)high_memory) {
532 unsigned long pfn = pte_pfn(*pte);
533 #ifdef CONFIG_HIGHMEM
534 if (pfn >= highstart_pfn)
535 kmap_flush_unused(); /* flush stale writable kmaps */
538 make_lowmem_page_readonly(
539 phys_to_virt(pfn << PAGE_SHIFT), feature);
543 void make_page_writable(void *va, unsigned int feature)
548 if (xen_feature(feature))
551 pte = virt_to_ptep(va);
552 rc = HYPERVISOR_update_va_mapping(
553 (unsigned long)va, pte_mkwrite(*pte), 0);
554 if (rc) /* fallback? */
555 xen_l1_entry_update(pte, pte_mkwrite(*pte));
556 if ((unsigned long)va >= (unsigned long)high_memory) {
557 unsigned long pfn = pte_pfn(*pte);
558 #ifdef CONFIG_HIGHMEM
559 if (pfn < highstart_pfn)
561 make_lowmem_page_writable(
562 phys_to_virt(pfn << PAGE_SHIFT), feature);
566 void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
568 if (xen_feature(feature))
572 make_page_readonly(va, feature);
573 va = (void *)((unsigned long)va + PAGE_SIZE);
577 void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
579 if (xen_feature(feature))
583 make_page_writable(va, feature);
584 va = (void *)((unsigned long)va + PAGE_SIZE);
588 static inline void pgd_walk_set_prot(void *pt, pgprot_t flags)
590 struct page *page = virt_to_page(pt);
591 unsigned long pfn = page_to_pfn(page);
593 if (PageHighMem(page))
595 BUG_ON(HYPERVISOR_update_va_mapping(
596 (unsigned long)__va(pfn << PAGE_SHIFT),
597 pfn_pte(pfn, flags), 0));
600 static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
602 pgd_t *pgd = pgd_base;
608 if (xen_feature(XENFEAT_auto_translated_physmap))
611 for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
614 pud = pud_offset(pgd, 0);
615 if (PTRS_PER_PUD > 1) /* not folded */
616 pgd_walk_set_prot(pud,flags);
617 for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
620 pmd = pmd_offset(pud, 0);
621 if (PTRS_PER_PMD > 1) /* not folded */
622 pgd_walk_set_prot(pmd,flags);
623 for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
626 pte = pte_offset_kernel(pmd,0);
627 pgd_walk_set_prot(pte,flags);
632 BUG_ON(HYPERVISOR_update_va_mapping(
633 (unsigned long)pgd_base,
634 pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
638 static void __pgd_pin(pgd_t *pgd)
640 pgd_walk(pgd, PAGE_KERNEL_RO);
641 xen_pgd_pin(__pa(pgd));
642 set_bit(PG_pinned, &virt_to_page(pgd)->flags);
645 static void __pgd_unpin(pgd_t *pgd)
647 xen_pgd_unpin(__pa(pgd));
648 pgd_walk(pgd, PAGE_KERNEL);
649 clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
652 static void pgd_test_and_unpin(pgd_t *pgd)
654 if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
658 void mm_pin(struct mm_struct *mm)
660 if (xen_feature(XENFEAT_writable_page_tables))
662 spin_lock(&mm->page_table_lock);
664 spin_unlock(&mm->page_table_lock);
667 void mm_unpin(struct mm_struct *mm)
669 if (xen_feature(XENFEAT_writable_page_tables))
671 spin_lock(&mm->page_table_lock);
672 __pgd_unpin(mm->pgd);
673 spin_unlock(&mm->page_table_lock);
676 void mm_pin_all(void)
680 /* Only pgds on the pgd_list please: none hidden in the slab cache. */
681 kmem_cache_shrink(pgd_cache);
683 if (xen_feature(XENFEAT_writable_page_tables))
686 for (page = pgd_list; page; page = (struct page *)page->index) {
687 if (!test_bit(PG_pinned, &page->flags))
688 __pgd_pin((pgd_t *)page_address(page));
692 void _arch_dup_mmap(struct mm_struct *mm)
694 if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
698 void _arch_exit_mmap(struct mm_struct *mm)
700 struct task_struct *tsk = current;
705 * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
706 * *much* faster this way, as no tlb flushes means bigger wrpt batches.
708 if (tsk->active_mm == mm) {
709 tsk->active_mm = &init_mm;
710 atomic_inc(&init_mm.mm_count);
712 switch_mm(mm, &init_mm, tsk);
714 atomic_dec(&mm->mm_count);
715 BUG_ON(atomic_read(&mm->mm_count) == 0);
720 if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
721 (atomic_read(&mm->mm_count) == 1) &&
722 !mm->context.has_foreign_mappings)