1 #ifndef _PPC64_PGALLOC_H
2 #define _PPC64_PGALLOC_H
5 #include <linux/slab.h>
6 #include <linux/cpumask.h>
7 #include <linux/percpu.h>
8 #include <asm/processor.h>
12 extern kmem_cache_t *zero_cache;
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 pgd_alloc(struct mm_struct *mm)
24 return kmem_cache_alloc(zero_cache, GFP_KERNEL);
30 kmem_cache_free(zero_cache, pgd);
33 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
36 pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
38 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
44 kmem_cache_free(zero_cache, pmd);
47 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
48 #define pmd_populate(mm, pmd, pte_page) \
49 pmd_populate_kernel(mm, pmd, page_address(pte_page))
52 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
55 pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
57 struct page *ptepage = virt_to_page(pte);
58 ptepage->mapping = (void *) mm;
59 ptepage->index = address & PMD_MASK;
64 static inline struct page *
65 pte_alloc_one(struct mm_struct *mm, unsigned long address)
68 pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT);
70 struct page *ptepage = virt_to_page(pte);
71 ptepage->mapping = (void *) mm;
72 ptepage->index = address & PMD_MASK;
78 static inline void pte_free_kernel(pte_t *pte)
80 virt_to_page(pte)->mapping = NULL;
81 kmem_cache_free(zero_cache, pte);
84 static inline void pte_free(struct page *ptepage)
86 ptepage->mapping = NULL;
87 kmem_cache_free(zero_cache, page_address(ptepage));
90 struct pte_freelist_batch
94 struct page * pages[0];
97 #define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \
98 sizeof(struct page *))
100 extern void pte_free_now(struct page *ptepage);
101 extern void pte_free_submit(struct pte_freelist_batch *batch);
103 DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur);
105 void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage);
106 #define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd))
108 #define check_pgt_cache() do { } while (0)
110 #endif /* _PPC64_PGALLOC_H */