Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / include / asm-powerpc / pgalloc.h
1 #ifndef _ASM_POWERPC_PGALLOC_H
2 #define _ASM_POWERPC_PGALLOC_H
3 #ifdef __KERNEL__
4
5 #ifndef CONFIG_PPC64
6 #include <asm-ppc/pgalloc.h>
7 #else
8
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/cpumask.h>
12 #include <linux/percpu.h>
13
14 extern kmem_cache_t *pgtable_cache[];
15
16 #ifdef CONFIG_PPC_64K_PAGES
17 #define PTE_CACHE_NUM   0
18 #define PMD_CACHE_NUM   1
19 #define PGD_CACHE_NUM   2
20 #define HUGEPTE_CACHE_NUM 3
21 #else
22 #define PTE_CACHE_NUM   0
23 #define PMD_CACHE_NUM   1
24 #define PUD_CACHE_NUM   1
25 #define PGD_CACHE_NUM   0
26 #define HUGEPTE_CACHE_NUM 2
27 #endif
28
29 /* Dummy functions since we don't support execshield on ppc */
30 #define arch_add_exec_range(mm, limit) do { ; } while (0)
31 #define arch_flush_exec_range(mm)      do { ; } while (0)
32 #define arch_remove_exec_range(mm, limit) do { ; } while (0)
33
34 /*
35  * This program is free software; you can redistribute it and/or
36  * modify it under the terms of the GNU General Public License
37  * as published by the Free Software Foundation; either version
38  * 2 of the License, or (at your option) any later version.
39  */
40
41 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
42 {
43         return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
44 }
45
46 static inline void pgd_free(pgd_t *pgd)
47 {
48         kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
49 }
50
51 #ifndef CONFIG_PPC_64K_PAGES
52
53 #define pgd_populate(MM, PGD, PUD)      pgd_set(PGD, PUD)
54
55 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
56 {
57         return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
58                                 GFP_KERNEL|__GFP_REPEAT);
59 }
60
61 static inline void pud_free(pud_t *pud)
62 {
63         kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
64 }
65
66 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
67 {
68         pud_set(pud, (unsigned long)pmd);
69 }
70
71 #define pmd_populate(mm, pmd, pte_page) \
72         pmd_populate_kernel(mm, pmd, page_address(pte_page))
73 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
74
75
76 #else /* CONFIG_PPC_64K_PAGES */
77
78 #define pud_populate(mm, pud, pmd)      pud_set(pud, (unsigned long)pmd)
79
80 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
81                                        pte_t *pte)
82 {
83         pmd_set(pmd, (unsigned long)pte);
84 }
85
86 #define pmd_populate(mm, pmd, pte_page) \
87         pmd_populate_kernel(mm, pmd, page_address(pte_page))
88
89 #endif /* CONFIG_PPC_64K_PAGES */
90
91 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
92 {
93         return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
94                                 GFP_KERNEL|__GFP_REPEAT);
95 }
96
97 static inline void pmd_free(pmd_t *pmd)
98 {
99         kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
100 }
101
102 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
103                                           unsigned long address)
104 {
105         return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
106                                 GFP_KERNEL|__GFP_REPEAT);
107 }
108
109 static inline struct page *pte_alloc_one(struct mm_struct *mm,
110                                          unsigned long address)
111 {
112         return virt_to_page(pte_alloc_one_kernel(mm, address));
113 }
114                 
115 static inline void pte_free_kernel(pte_t *pte)
116 {
117         kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
118 }
119
120 static inline void pte_free(struct page *ptepage)
121 {
122         pte_free_kernel(page_address(ptepage));
123 }
124
125 #define PGF_CACHENUM_MASK       0x3
126
127 typedef struct pgtable_free {
128         unsigned long val;
129 } pgtable_free_t;
130
131 static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
132                                                 unsigned long mask)
133 {
134         BUG_ON(cachenum > PGF_CACHENUM_MASK);
135
136         return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
137 }
138
139 static inline void pgtable_free(pgtable_free_t pgf)
140 {
141         void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
142         int cachenum = pgf.val & PGF_CACHENUM_MASK;
143
144         kmem_cache_free(pgtable_cache[cachenum], p);
145 }
146
147 extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
148
149 #define __pte_free_tlb(tlb, ptepage)    \
150         pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
151                 PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
152 #define __pmd_free_tlb(tlb, pmd)        \
153         pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
154                 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
155 #ifndef CONFIG_PPC_64K_PAGES
156 #define __pud_free_tlb(tlb, pud)        \
157         pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
158                 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
159 #endif /* CONFIG_PPC_64K_PAGES */
160
161 #define check_pgt_cache()       do { } while (0)
162
163 #endif /* CONFIG_PPC64 */
164 #endif /* __KERNEL__ */
165 #endif /* _ASM_POWERPC_PGALLOC_H */