vserver 2.0 rc7
[linux-2.6.git] / include / asm-sparc64 / pgalloc.h
1 /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
2 #ifndef _SPARC64_PGALLOC_H
3 #define _SPARC64_PGALLOC_H
4
5 #include <linux/config.h>
6 #include <linux/kernel.h>
7 #include <linux/sched.h>
8 #include <linux/mm.h>
9
10 #include <asm/spitfire.h>
11 #include <asm/cpudata.h>
12 #include <asm/cacheflush.h>
13
14 /* Page table allocation/freeing. */
15 #ifdef CONFIG_SMP
16 /* Sliiiicck */
17 #define pgt_quicklists  local_cpu_data()
18 #else
19 extern struct pgtable_cache_struct {
20         unsigned long *pgd_cache;
21         unsigned long *pte_cache[2];
22         unsigned int pgcache_size;
23 } pgt_quicklists;
24 #endif
25 #define pgd_quicklist           (pgt_quicklists.pgd_cache)
26 #define pmd_quicklist           ((unsigned long *)0)
27 #define pte_quicklist           (pgt_quicklists.pte_cache)
28 #define pgtable_cache_size      (pgt_quicklists.pgcache_size)
29
30 static __inline__ void free_pgd_fast(pgd_t *pgd)
31 {
32         preempt_disable();
33         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
34         pgd_quicklist = (unsigned long *) pgd;
35         pgtable_cache_size++;
36         preempt_enable();
37 }
38
39 static __inline__ pgd_t *get_pgd_fast(void)
40 {
41         unsigned long *ret;
42
43         preempt_disable();
44         if((ret = pgd_quicklist) != NULL) {
45                 pgd_quicklist = (unsigned long *)(*ret);
46                 ret[0] = 0;
47                 pgtable_cache_size--;
48                 preempt_enable();
49         } else {
50                 preempt_enable();
51                 ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
52                 if(ret)
53                         memset(ret, 0, PAGE_SIZE);
54         }
55         return (pgd_t *)ret;
56 }
57
58 static __inline__ void free_pgd_slow(pgd_t *pgd)
59 {
60         free_page((unsigned long)pgd);
61 }
62
63 #ifdef DCACHE_ALIASING_POSSIBLE
64 #define VPTE_COLOR(address)             (((address) >> (PAGE_SHIFT + 10)) & 1UL)
65 #define DCACHE_COLOR(address)           (((address) >> PAGE_SHIFT) & 1UL)
66 #else
67 #define VPTE_COLOR(address)             0
68 #define DCACHE_COLOR(address)           0
69 #endif
70
71 #define pud_populate(MM, PUD, PMD)      pud_set(PUD, PMD)
72
73 static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
74 {
75         unsigned long *ret;
76         int color = 0;
77
78         preempt_disable();
79         if (pte_quicklist[color] == NULL)
80                 color = 1;
81
82         if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
83                 pte_quicklist[color] = (unsigned long *)(*ret);
84                 ret[0] = 0;
85                 pgtable_cache_size--;
86         }
87         preempt_enable();
88
89         return (pmd_t *)ret;
90 }
91
92 static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
93 {
94         pmd_t *pmd;
95
96         pmd = pmd_alloc_one_fast(mm, address);
97         if (!pmd) {
98                 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
99                 if (pmd)
100                         memset(pmd, 0, PAGE_SIZE);
101         }
102         return pmd;
103 }
104
105 static __inline__ void free_pmd_fast(pmd_t *pmd)
106 {
107         unsigned long color = DCACHE_COLOR((unsigned long)pmd);
108
109         preempt_disable();
110         *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
111         pte_quicklist[color] = (unsigned long *) pmd;
112         pgtable_cache_size++;
113         preempt_enable();
114 }
115
116 static __inline__ void free_pmd_slow(pmd_t *pmd)
117 {
118         free_page((unsigned long)pmd);
119 }
120
121 #define pmd_populate_kernel(MM, PMD, PTE)       pmd_set(PMD, PTE)
122 #define pmd_populate(MM,PMD,PTE_PAGE)           \
123         pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
124
125 extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
126
127 static inline struct page *
128 pte_alloc_one(struct mm_struct *mm, unsigned long addr)
129 {
130         pte_t *pte = pte_alloc_one_kernel(mm, addr);
131
132         if (pte)
133                 return virt_to_page(pte);
134
135         return NULL;
136 }
137
138 static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
139 {
140         unsigned long color = VPTE_COLOR(address);
141         unsigned long *ret;
142
143         preempt_disable();
144         if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
145                 pte_quicklist[color] = (unsigned long *)(*ret);
146                 ret[0] = 0;
147                 pgtable_cache_size--;
148         }
149         preempt_enable();
150         return (pte_t *)ret;
151 }
152
153 static __inline__ void free_pte_fast(pte_t *pte)
154 {
155         unsigned long color = DCACHE_COLOR((unsigned long)pte);
156
157         preempt_disable();
158         *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
159         pte_quicklist[color] = (unsigned long *) pte;
160         pgtable_cache_size++;
161         preempt_enable();
162 }
163
164 static __inline__ void free_pte_slow(pte_t *pte)
165 {
166         free_page((unsigned long)pte);
167 }
168
169 static inline void pte_free_kernel(pte_t *pte)
170 {
171         free_pte_fast(pte);
172 }
173
174 static inline void pte_free(struct page *ptepage)
175 {
176         free_pte_fast(page_address(ptepage));
177 }
178
179 #define pmd_free(pmd)           free_pmd_fast(pmd)
180 #define pgd_free(pgd)           free_pgd_fast(pgd)
181 #define pgd_alloc(mm)           get_pgd_fast()
182
183 #endif /* _SPARC64_PGALLOC_H */