X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-parisc%2Fpgalloc.h;h=53a6fec7df959588753102198c829dc36da8fcbe;hb=9bf4aaab3e101692164d49b7ca357651eb691cb6;hp=eb97d25ff3534ae513a08b4ed2bb7ab0dd06b01f;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-parisc/pgalloc.h b/include/asm-parisc/pgalloc.h index eb97d25ff..53a6fec7d 100644 --- a/include/asm-parisc/pgalloc.h +++ b/include/asm-parisc/pgalloc.h @@ -10,39 +10,76 @@ #include #include +/* Allocate the top level pgd (page directory) + * + * Here (for 64 bit kernels) we implement a Hybrid L2/L3 scheme: we + * allocate the first pmd adjacent to the pgd. This means that we can + * subtract a constant offset to get to it. The pmd and pgd sizes are + * arranged so that a single pmd covers 4GB (giving a full LP64 + * process access to 8TB) so our lookups are effectively L2 for the + * first 4GB of the kernel (i.e. for all ILP32 processes and all the + * kernel for machines with under 4GB of memory) */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); - if (likely(pgd != NULL)) - clear_page(pgd); - return pgd; + pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, + PGD_ALLOC_ORDER); + pgd_t *actual_pgd = pgd; + + if (likely(pgd != NULL)) { + memset(pgd, 0, PAGE_SIZE<> PxD_VALUE_SHIFT)); + /* The first pmd entry also is marked with _PAGE_GATEWAY as + * a signal that this pmd may not be freed */ + __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); +#endif + } + return actual_pgd; } static inline void pgd_free(pgd_t *pgd) { - free_page((unsigned long)pgd); +#ifdef __LP64__ + pgd -= PTRS_PER_PGD; +#endif + free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); } -#ifdef __LP64__ +#if PT_NLEVELS == 3 /* Three Level Page Table Support for pmd's */ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) { - pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)pmd); + __pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) + + (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)); } static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); + pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, + PMD_ORDER); if (pmd) - clear_page(pmd); + memset(pmd, 0, PAGE_SIZE<> PxD_VALUE_SHIFT)); + else +#endif + __pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) + + (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)); } #define pmd_populate(mm, pmd, pte_page) \