vserver 1.9.5.x5
[linux-2.6.git] / include / asm-sparc64 / pgalloc.h
1 /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
2 #ifndef _SPARC64_PGALLOC_H
3 #define _SPARC64_PGALLOC_H
4
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9
10 #include <asm/page.h>
11 #include <asm/spitfire.h>
12 #include <asm/pgtable.h>
13 #include <asm/cpudata.h>
14
15 /* Page table allocation/freeing. */
16 #ifdef CONFIG_SMP
17 /* Sliiiicck */
18 #define pgt_quicklists  cpu_data(smp_processor_id())
19 #else
20 extern struct pgtable_cache_struct {
21         unsigned long *pgd_cache;
22         unsigned long *pte_cache[2];
23         unsigned int pgcache_size;
24         unsigned int pgdcache_size;
25 } pgt_quicklists;
26 #endif
27 #define pgd_quicklist           (pgt_quicklists.pgd_cache)
28 #define pmd_quicklist           ((unsigned long *)0)
29 #define pte_quicklist           (pgt_quicklists.pte_cache)
30 #define pgtable_cache_size      (pgt_quicklists.pgcache_size)
31 #define pgd_cache_size          (pgt_quicklists.pgdcache_size)
32
33 #ifndef CONFIG_SMP
34
35 static __inline__ void free_pgd_fast(pgd_t *pgd)
36 {
37         struct page *page = virt_to_page(pgd);
38
39         preempt_disable();
40         if (!page->lru.prev) {
41                 page->lru.next = (void *) pgd_quicklist;
42                 pgd_quicklist = (unsigned long *)page;
43         }
44         page->lru.prev = (void *)
45           (((unsigned long)page->lru.prev) |
46            (((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1));
47         pgd_cache_size++;
48         preempt_enable();
49 }
50
51 static __inline__ pgd_t *get_pgd_fast(void)
52 {
53         struct page *ret;
54
55         preempt_disable();
56         if ((ret = (struct page *)pgd_quicklist) != NULL) {
57                 unsigned long mask = (unsigned long)ret->lru.prev;
58                 unsigned long off = 0;
59
60                 if (mask & 1)
61                         mask &= ~1;
62                 else {
63                         off = PAGE_SIZE / 2;
64                         mask &= ~2;
65                 }
66                 ret->lru.prev = (void *) mask;
67                 if (!mask)
68                         pgd_quicklist = (unsigned long *)ret->lru.next;
69                 ret = (struct page *)(__page_address(ret) + off);
70                 pgd_cache_size--;
71                 preempt_enable();
72         } else {
73                 struct page *page;
74
75                 preempt_enable();
76                 page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
77                 if (page) {
78                         ret = (struct page *)page_address(page);
79                         page->lru.prev = (void *) 2UL;
80
81                         preempt_disable();
82                         page->lru.next = (void *) pgd_quicklist;
83                         pgd_quicklist = (unsigned long *)page;
84                         pgd_cache_size++;
85                         preempt_enable();
86                 }
87         }
88         return (pgd_t *)ret;
89 }
90
91 #else /* CONFIG_SMP */
92
93 static __inline__ void free_pgd_fast(pgd_t *pgd)
94 {
95         preempt_disable();
96         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
97         pgd_quicklist = (unsigned long *) pgd;
98         pgtable_cache_size++;
99         preempt_enable();
100 }
101
102 static __inline__ pgd_t *get_pgd_fast(void)
103 {
104         unsigned long *ret;
105
106         preempt_disable();
107         if((ret = pgd_quicklist) != NULL) {
108                 pgd_quicklist = (unsigned long *)(*ret);
109                 ret[0] = 0;
110                 pgtable_cache_size--;
111                 preempt_enable();
112         } else {
113                 preempt_enable();
114                 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
115                 if(ret)
116                         memset(ret, 0, PAGE_SIZE);
117         }
118         return (pgd_t *)ret;
119 }
120
121 static __inline__ void free_pgd_slow(pgd_t *pgd)
122 {
123         free_page((unsigned long)pgd);
124 }
125
126 #endif /* CONFIG_SMP */
127
128 #if (L1DCACHE_SIZE > PAGE_SIZE)                 /* is there D$ aliasing problem */
129 #define VPTE_COLOR(address)             (((address) >> (PAGE_SHIFT + 10)) & 1UL)
130 #define DCACHE_COLOR(address)           (((address) >> PAGE_SHIFT) & 1UL)
131 #else
132 #define VPTE_COLOR(address)             0
133 #define DCACHE_COLOR(address)           0
134 #endif
135
136 #define pud_populate(MM, PUD, PMD)      pud_set(PUD, PMD)
137
138 static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
139 {
140         unsigned long *ret;
141         int color = 0;
142
143         preempt_disable();
144         if (pte_quicklist[color] == NULL)
145                 color = 1;
146
147         if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
148                 pte_quicklist[color] = (unsigned long *)(*ret);
149                 ret[0] = 0;
150                 pgtable_cache_size--;
151         }
152         preempt_enable();
153
154         return (pmd_t *)ret;
155 }
156
157 static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
158 {
159         pmd_t *pmd;
160
161         pmd = pmd_alloc_one_fast(mm, address);
162         if (!pmd) {
163                 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
164                 if (pmd)
165                         memset(pmd, 0, PAGE_SIZE);
166         }
167         return pmd;
168 }
169
170 static __inline__ void free_pmd_fast(pmd_t *pmd)
171 {
172         unsigned long color = DCACHE_COLOR((unsigned long)pmd);
173
174         preempt_disable();
175         *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
176         pte_quicklist[color] = (unsigned long *) pmd;
177         pgtable_cache_size++;
178         preempt_enable();
179 }
180
181 static __inline__ void free_pmd_slow(pmd_t *pmd)
182 {
183         free_page((unsigned long)pmd);
184 }
185
186 #define pmd_populate_kernel(MM, PMD, PTE)       pmd_set(PMD, PTE)
187 #define pmd_populate(MM,PMD,PTE_PAGE)           \
188         pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
189
190 extern pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
191
192 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
193 {
194         pte_t *pte = __pte_alloc_one_kernel(mm, address);
195         if (pte) {
196                 struct page *page = virt_to_page(pte);
197                 page->mapping = (void *) mm;
198                 page->index = address & PMD_MASK;
199         }
200         return pte;
201 }
202
203 static inline struct page *
204 pte_alloc_one(struct mm_struct *mm, unsigned long addr)
205 {
206         pte_t *pte = __pte_alloc_one_kernel(mm, addr);
207         if (pte) {
208                 struct page *page = virt_to_page(pte);
209                 page->mapping = (void *) mm;
210                 page->index = addr & PMD_MASK;
211                 return page;
212         }
213         return NULL;
214 }
215
216 static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
217 {
218         unsigned long color = VPTE_COLOR(address);
219         unsigned long *ret;
220
221         preempt_disable();
222         if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
223                 pte_quicklist[color] = (unsigned long *)(*ret);
224                 ret[0] = 0;
225                 pgtable_cache_size--;
226         }
227         preempt_enable();
228         return (pte_t *)ret;
229 }
230
231 static __inline__ void free_pte_fast(pte_t *pte)
232 {
233         unsigned long color = DCACHE_COLOR((unsigned long)pte);
234
235         preempt_disable();
236         *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
237         pte_quicklist[color] = (unsigned long *) pte;
238         pgtable_cache_size++;
239         preempt_enable();
240 }
241
242 static __inline__ void free_pte_slow(pte_t *pte)
243 {
244         free_page((unsigned long)pte);
245 }
246
247 static inline void pte_free_kernel(pte_t *pte)
248 {
249         virt_to_page(pte)->mapping = NULL;
250         free_pte_fast(pte);
251 }
252
253 static inline void pte_free(struct page *ptepage)
254 {
255         ptepage->mapping = NULL;
256         free_pte_fast(page_address(ptepage));
257 }
258
259 #define pmd_free(pmd)           free_pmd_fast(pmd)
260 #define pgd_free(pgd)           free_pgd_fast(pgd)
261 #define pgd_alloc(mm)           get_pgd_fast()
262
263 #endif /* _SPARC64_PGALLOC_H */