X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fi386%2Fmm%2Fpgtable.c;h=0742d54f8bb06bc70fc5f3783ec3c08c0812a2fa;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=137d18db72ff3cf163bc408140eb9497c594bd03;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c index 137d18db7..0742d54f8 100644 --- a/arch/i386/mm/pgtable.c +++ b/arch/i386/mm/pgtable.c @@ -62,6 +62,7 @@ void show_mem(void) static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -70,7 +71,12 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) BUG(); return; } - pmd = pmd_offset(pgd, vaddr); + pud = pud_offset(pgd, vaddr); + if (pud_none(*pud)) { + BUG(); + return; + } + pmd = pmd_offset(pud, vaddr); if (pmd_none(*pmd)) { BUG(); return; @@ -95,6 +101,7 @@ static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ @@ -110,7 +117,8 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) printk ("set_pmd_pfn: pgd_none\n"); return; /* BUG(); */ } - pmd = pmd_offset(pgd, vaddr); + pud = pud_offset(pgd, vaddr); + pmd = pmd_offset(pud, vaddr); set_pmd(pmd, pfn_pmd(pfn, flags)); /* * It's enough to flush this one mapping. @@ -132,10 +140,7 @@ void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) { - pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); - if (pte) - clear_page(pte); - return pte; + return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); } struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) @@ -143,12 +148,10 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) struct page *pte; #ifdef CONFIG_HIGHPTE - pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0); + pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); #else - pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); + pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); #endif - if (pte) - clear_highpage(pte); return pte; } @@ -165,13 +168,11 @@ void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags) * against pageattr.c; it is the unique case in which a valid change * of kernel pagetables can't be lazily synchronized by vmalloc faults. * vmalloc faults work because attached pagetables are never freed. - * If the locking proves to be non-performant, a ticketing scheme with - * checks at dup_mmap(), exec(), and other mmlist addition points - * could be used. The locking scheme was chosen on the basis of - * manfred's recommendations and having no core impact whatsoever. + * The locking scheme was chosen on the basis of manfred's + * recommendations and having no core impact whatsoever. * -- wli */ -spinlock_t pgd_lock = SPIN_LOCK_UNLOCKED; +DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; static inline void pgd_list_add(pgd_t *pgd) @@ -235,7 +236,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; - set_pgd(&pgd[i], __pgd(1 + __pa((u64)((u32)pmd)))); + set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; @@ -254,6 +255,6 @@ void pgd_free(pgd_t *pgd) if (PTRS_PER_PMD > 1) for (i = 0; i < USER_PTRS_PER_PGD; ++i) kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); - /* in the non-PAE case, clear_page_tables() clears user pgd entries */ + /* in the non-PAE case, clear_page_range() clears user pgd entries */ kmem_cache_free(pgd_cache, pgd); }