#ifndef _ASM_PGALLOC_H #define _ASM_PGALLOC_H #include #include #include #include #include #include #include static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); if (likely(pgd != NULL)) clear_page(pgd); return pgd; } static inline void pgd_free(pgd_t *pgd) { free_page((unsigned long)pgd); } #ifdef __LP64__ /* Three Level Page Table Support for pmd's */ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) { pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)pmd); } static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { pmd_t *pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (pmd) clear_page(pmd); return pmd; } static inline void pmd_free(pmd_t *pmd) { free_page((unsigned long)pmd); } #else /* Two Level Page Table Support for pmd's */ /* * allocating and freeing a pmd is trivial: the 1-entry pmd is * inside the pgd, so has no extra memory associated with it. */ #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); }) #define pmd_free(x) do { } while (0) #define pgd_populate(mm, pmd, pte) BUG() #endif static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { pmd_val(*pmd) = _PAGE_TABLE + __pa((unsigned long)pte); } #define pmd_populate(mm, pmd, pte_page) \ pmd_populate_kernel(mm, pmd, page_address(pte_page)) static inline struct page * pte_alloc_one(struct mm_struct *mm, unsigned long address) { struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT); if (likely(page != NULL)) clear_page(page_address(page)); return page; } static inline pte_t * pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr) { pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); if (likely(pte != NULL)) clear_page(pte); return pte; } static inline void pte_free_kernel(pte_t *pte) { free_page((unsigned long)pte); } #define pte_free(page) pte_free_kernel(page_address(page)) extern int do_check_pgt_cache(int, int); #define check_pgt_cache() do { } while (0) #endif