ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / include / asm-sparc64 / pgalloc.h
1 /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
2 #ifndef _SPARC64_PGALLOC_H
3 #define _SPARC64_PGALLOC_H
4
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9
10 #include <asm/page.h>
11 #include <asm/spitfire.h>
12 #include <asm/pgtable.h>
13 #include <asm/cpudata.h>
14
15 /* Page table allocation/freeing. */
16 #ifdef CONFIG_SMP
17 /* Sliiiicck */
18 #define pgt_quicklists  cpu_data(smp_processor_id())
19 #else
20 extern struct pgtable_cache_struct {
21         unsigned long *pgd_cache;
22         unsigned long *pte_cache[2];
23         unsigned int pgcache_size;
24         unsigned int pgdcache_size;
25 } pgt_quicklists;
26 #endif
27 #define pgd_quicklist           (pgt_quicklists.pgd_cache)
28 #define pmd_quicklist           ((unsigned long *)0)
29 #define pte_quicklist           (pgt_quicklists.pte_cache)
30 #define pgtable_cache_size      (pgt_quicklists.pgcache_size)
31 #define pgd_cache_size          (pgt_quicklists.pgdcache_size)
32
33 #ifndef CONFIG_SMP
34
35 static __inline__ void free_pgd_fast(pgd_t *pgd)
36 {
37         struct page *page = virt_to_page(pgd);
38
39         preempt_disable();
40         if (!page->lru.prev) {
41                 page->lru.next = (void *) pgd_quicklist;
42                 pgd_quicklist = (unsigned long *)page;
43         }
44         page->lru.prev = (void *)
45           (((unsigned long)page->lru.prev) |
46            (((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1));
47         pgd_cache_size++;
48         preempt_enable();
49 }
50
51 static __inline__ pgd_t *get_pgd_fast(void)
52 {
53         struct page *ret;
54
55         preempt_disable();
56         if ((ret = (struct page *)pgd_quicklist) != NULL) {
57                 unsigned long mask = (unsigned long)ret->lru.prev;
58                 unsigned long off = 0;
59
60                 if (mask & 1)
61                         mask &= ~1;
62                 else {
63                         off = PAGE_SIZE / 2;
64                         mask &= ~2;
65                 }
66                 ret->lru.prev = (void *) mask;
67                 if (!mask)
68                         pgd_quicklist = (unsigned long *)ret->lru.next;
69                 ret = (struct page *)(__page_address(ret) + off);
70                 pgd_cache_size--;
71                 preempt_enable();
72         } else {
73                 struct page *page;
74
75                 preempt_enable();
76                 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
77                 if (page) {
78                         ret = (struct page *)page_address(page);
79                         clear_page(ret);
80                         page->lru.prev = (void *) 2UL;
81
82                         preempt_disable();
83                         page->lru.next = (void *) pgd_quicklist;
84                         pgd_quicklist = (unsigned long *)page;
85                         pgd_cache_size++;
86                         preempt_enable();
87                 }
88         }
89         return (pgd_t *)ret;
90 }
91
92 #else /* CONFIG_SMP */
93
94 static __inline__ void free_pgd_fast(pgd_t *pgd)
95 {
96         preempt_disable();
97         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
98         pgd_quicklist = (unsigned long *) pgd;
99         pgtable_cache_size++;
100         preempt_enable();
101 }
102
103 static __inline__ pgd_t *get_pgd_fast(void)
104 {
105         unsigned long *ret;
106
107         preempt_disable();
108         if((ret = pgd_quicklist) != NULL) {
109                 pgd_quicklist = (unsigned long *)(*ret);
110                 ret[0] = 0;
111                 pgtable_cache_size--;
112                 preempt_enable();
113         } else {
114                 preempt_enable();
115                 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
116                 if(ret)
117                         memset(ret, 0, PAGE_SIZE);
118         }
119         return (pgd_t *)ret;
120 }
121
122 static __inline__ void free_pgd_slow(pgd_t *pgd)
123 {
124         free_page((unsigned long)pgd);
125 }
126
127 #endif /* CONFIG_SMP */
128
129 #if (L1DCACHE_SIZE > PAGE_SIZE)                 /* is there D$ aliasing problem */
130 #define VPTE_COLOR(address)             (((address) >> (PAGE_SHIFT + 10)) & 1UL)
131 #define DCACHE_COLOR(address)           (((address) >> PAGE_SHIFT) & 1UL)
132 #else
133 #define VPTE_COLOR(address)             0
134 #define DCACHE_COLOR(address)           0
135 #endif
136
137 #define pgd_populate(MM, PGD, PMD)      pgd_set(PGD, PMD)
138
139 static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
140 {
141         unsigned long *ret;
142         int color = 0;
143
144         preempt_disable();
145         if (pte_quicklist[color] == NULL)
146                 color = 1;
147
148         if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
149                 pte_quicklist[color] = (unsigned long *)(*ret);
150                 ret[0] = 0;
151                 pgtable_cache_size--;
152         }
153         preempt_enable();
154
155         return (pmd_t *)ret;
156 }
157
158 static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
159 {
160         pmd_t *pmd;
161
162         pmd = pmd_alloc_one_fast(mm, address);
163         if (!pmd) {
164                 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
165                 if (pmd)
166                         memset(pmd, 0, PAGE_SIZE);
167         }
168         return pmd;
169 }
170
171 static __inline__ void free_pmd_fast(pmd_t *pmd)
172 {
173         unsigned long color = DCACHE_COLOR((unsigned long)pmd);
174
175         preempt_disable();
176         *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
177         pte_quicklist[color] = (unsigned long *) pmd;
178         pgtable_cache_size++;
179         preempt_enable();
180 }
181
182 static __inline__ void free_pmd_slow(pmd_t *pmd)
183 {
184         free_page((unsigned long)pmd);
185 }
186
187 #define pmd_populate_kernel(MM, PMD, PTE)       pmd_set(PMD, PTE)
188 #define pmd_populate(MM,PMD,PTE_PAGE)           \
189         pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
190
191 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
192
193 static inline struct page *
194 pte_alloc_one(struct mm_struct *mm, unsigned long addr)
195 {
196         pte_t *pte = pte_alloc_one_kernel(mm, addr);
197         if (pte)
198                 return virt_to_page(pte);
199         return 0;
200 }
201
202 static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
203 {
204         unsigned long color = VPTE_COLOR(address);
205         unsigned long *ret;
206
207         preempt_disable();
208         if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
209                 pte_quicklist[color] = (unsigned long *)(*ret);
210                 ret[0] = 0;
211                 pgtable_cache_size--;
212         }
213         preempt_enable();
214         return (pte_t *)ret;
215 }
216
217 static __inline__ void free_pte_fast(pte_t *pte)
218 {
219         unsigned long color = DCACHE_COLOR((unsigned long)pte);
220
221         preempt_disable();
222         *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
223         pte_quicklist[color] = (unsigned long *) pte;
224         pgtable_cache_size++;
225         preempt_enable();
226 }
227
228 static __inline__ void free_pte_slow(pte_t *pte)
229 {
230         free_page((unsigned long)pte);
231 }
232
233 #define pte_free_kernel(pte)    free_pte_fast(pte)
234 #define pte_free(pte)           free_pte_fast(page_address(pte))
235 #define pmd_free(pmd)           free_pmd_fast(pmd)
236 #define pgd_free(pgd)           free_pgd_fast(pgd)
237 #define pgd_alloc(mm)           get_pgd_fast()
238
239 #endif /* _SPARC64_PGALLOC_H */