fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / i386 / mm / pgtable.c
index 137d18d..f349eaf 100644 (file)
@@ -2,7 +2,6 @@
  *  linux/arch/i386/mm/pgtable.c
  */
 
-#include <linux/config.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -13,6 +12,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/spinlock.h>
+#include <linux/module.h>
 
 #include <asm/system.h>
 #include <asm/pgtable.h>
@@ -30,13 +30,15 @@ void show_mem(void)
        struct page *page;
        pg_data_t *pgdat;
        unsigned long i;
+       unsigned long flags;
 
-       printk("Mem-info:\n");
+       printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
-       for_each_pgdat(pgdat) {
+       printk(KERN_INFO "Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
+       for_each_online_pgdat(pgdat) {
+               pgdat_resize_lock(pgdat, &flags);
                for (i = 0; i < pgdat->node_spanned_pages; ++i) {
-                       page = pgdat->node_mem_map + i;
+                       page = pgdat_page_nr(pgdat, i);
                        total++;
                        if (PageHighMem(page))
                                highmem++;
@@ -47,12 +49,23 @@ void show_mem(void)
                        else if (page_count(page))
                                shared += page_count(page) - 1;
                }
+               pgdat_resize_unlock(pgdat, &flags);
        }
-       printk("%d pages of RAM\n", total);
-       printk("%d pages of HIGHMEM\n",highmem);
-       printk("%d reserved pages\n",reserved);
-       printk("%d pages shared\n",shared);
-       printk("%d pages swap cached\n",cached);
+       printk(KERN_INFO "%d pages of RAM\n", total);
+       printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
+       printk(KERN_INFO "%d reserved pages\n", reserved);
+       printk(KERN_INFO "%d pages shared\n", shared);
+       printk(KERN_INFO "%d pages swap cached\n", cached);
+
+       printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
+       printk(KERN_INFO "%lu pages writeback\n",
+                                       global_page_state(NR_WRITEBACK));
+       printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
+       printk(KERN_INFO "%lu pages slab\n",
+               global_page_state(NR_SLAB_RECLAIMABLE) +
+               global_page_state(NR_SLAB_UNRECLAIMABLE));
+       printk(KERN_INFO "%lu pages pagetables\n",
+                                       global_page_state(NR_PAGETABLE));
 }
 
 /*
@@ -62,6 +75,7 @@ void show_mem(void)
 static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 {
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
 
@@ -70,14 +84,22 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
                BUG();
                return;
        }
-       pmd = pmd_offset(pgd, vaddr);
+       pud = pud_offset(pgd, vaddr);
+       if (pud_none(*pud)) {
+               BUG();
+               return;
+       }
+       pmd = pmd_offset(pud, vaddr);
        if (pmd_none(*pmd)) {
                BUG();
                return;
        }
        pte = pte_offset_kernel(pmd, vaddr);
-       /* <pfn,flags> stored as-is, to permit clearing entries */
-       set_pte(pte, pfn_pte(pfn, flags));
+       if (pgprot_val(flags))
+               /* <pfn,flags> stored as-is, to permit clearing entries */
+               set_pte(pte, pfn_pte(pfn, flags));
+       else
+               pte_clear(&init_mm, vaddr, pte);
 
        /*
         * It's enough to flush this one mapping.
@@ -95,22 +117,24 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 {
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
 
        if (vaddr & (PMD_SIZE-1)) {             /* vaddr is misaligned */
-               printk ("set_pmd_pfn: vaddr misaligned\n");
+               printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
                return; /* BUG(); */
        }
        if (pfn & (PTRS_PER_PTE-1)) {           /* pfn is misaligned */
-               printk ("set_pmd_pfn: pfn misaligned\n");
+               printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
                return; /* BUG(); */
        }
        pgd = swapper_pg_dir + pgd_index(vaddr);
        if (pgd_none(*pgd)) {
-               printk ("set_pmd_pfn: pgd_none\n");
+               printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
                return; /* BUG(); */
        }
-       pmd = pmd_offset(pgd, vaddr);
+       pud = pud_offset(pgd, vaddr);
+       pmd = pmd_offset(pud, vaddr);
        set_pmd(pmd, pfn_pmd(pfn, flags));
        /*
         * It's enough to flush this one mapping.
@@ -119,6 +143,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
        __flush_tlb_one(vaddr);
 }
 
+static int fixmaps;
+#ifndef CONFIG_COMPAT_VDSO
+unsigned long __FIXADDR_TOP = 0xfffff000;
+EXPORT_SYMBOL(__FIXADDR_TOP);
+#endif
+
 void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
 {
        unsigned long address = __fix_to_virt(idx);
@@ -128,14 +158,30 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
                return;
        }
        set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
+       fixmaps++;
+}
+
+/**
+ * reserve_top_address - reserves a hole in the top of kernel address space
+ * @reserve - size of hole to reserve
+ *
+ * Can be used to relocate the fixmap area and poke a hole in the top
+ * of kernel address space to make room for a hypervisor.
+ */
+void reserve_top_address(unsigned long reserve)
+{
+       BUG_ON(fixmaps > 0);
+#ifdef CONFIG_COMPAT_VDSO
+       BUG_ON(reserve != 0);
+#else
+       __FIXADDR_TOP = -reserve - PAGE_SIZE;
+       __VMALLOC_RESERVE += reserve;
+#endif
 }
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
-       if (pte)
-               clear_page(pte);
-       return pte;
+       return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
 }
 
 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
@@ -143,16 +189,14 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *pte;
 
 #ifdef CONFIG_HIGHPTE
-       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
 #else
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
 #endif
-       if (pte)
-               clear_highpage(pte);
        return pte;
 }
 
-void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
+void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
 {
        memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
 }
@@ -165,13 +209,11 @@ void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
  * against pageattr.c; it is the unique case in which a valid change
  * of kernel pagetables can't be lazily synchronized by vmalloc faults.
  * vmalloc faults work because attached pagetables are never freed.
- * If the locking proves to be non-performant, a ticketing scheme with
- * checks at dup_mmap(), exec(), and other mmlist addition points
- * could be used. The locking scheme was chosen on the basis of
- * manfred's recommendations and having no core impact whatsoever.
+ * The locking scheme was chosen on the basis of manfred's
+ * recommendations and having no core impact whatsoever.
  * -- wli
  */
-spinlock_t pgd_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(pgd_lock);
 struct page *pgd_list;
 
 static inline void pgd_list_add(pgd_t *pgd)
@@ -179,42 +221,42 @@ static inline void pgd_list_add(pgd_t *pgd)
        struct page *page = virt_to_page(pgd);
        page->index = (unsigned long)pgd_list;
        if (pgd_list)
-               pgd_list->private = (unsigned long)&page->index;
+               set_page_private(pgd_list, (unsigned long)&page->index);
        pgd_list = page;
-       page->private = (unsigned long)&pgd_list;
+       set_page_private(page, (unsigned long)&pgd_list);
 }
 
 static inline void pgd_list_del(pgd_t *pgd)
 {
        struct page *next, **pprev, *page = virt_to_page(pgd);
        next = (struct page *)page->index;
-       pprev = (struct page **)page->private;
+       pprev = (struct page **)page_private(page);
        *pprev = next;
        if (next)
-               next->private = (unsigned long)pprev;
+               set_page_private(next, (unsigned long)pprev);
 }
 
-void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
        unsigned long flags;
 
-       if (PTRS_PER_PMD == 1)
+       if (PTRS_PER_PMD == 1) {
+               memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
                spin_lock_irqsave(&pgd_lock, flags);
+       }
 
-       memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
+       clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
                        swapper_pg_dir + USER_PTRS_PER_PGD,
-                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-
+                       KERNEL_PGD_PTRS);
        if (PTRS_PER_PMD > 1)
                return;
 
        pgd_list_add(pgd);
        spin_unlock_irqrestore(&pgd_lock, flags);
-       memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
 }
 
 /* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
+void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 {
        unsigned long flags; /* can be called from interrupt context */
 
@@ -235,7 +277,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
                pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
                if (!pmd)
                        goto out_oom;
-               set_pgd(&pgd[i], __pgd(1 + __pa((u64)((u32)pmd))));
+               set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
        }
        return pgd;
 
@@ -254,6 +296,6 @@ void pgd_free(pgd_t *pgd)
        if (PTRS_PER_PMD > 1)
                for (i = 0; i < USER_PTRS_PER_PGD; ++i)
                        kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
-       /* in the non-PAE case, clear_page_tables() clears user pgd entries */
+       /* in the non-PAE case, free_pgtables() clears user pgd entries */
        kmem_cache_free(pgd_cache, pgd);
 }