Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / include / asm-ia64 / pgalloc.h
1 #ifndef _ASM_IA64_PGALLOC_H
2 #define _ASM_IA64_PGALLOC_H
3
4 #define arch_add_exec_range(mm, limit)          do { ; } while (0)
5 #define arch_flush_exec_range(mm)               do { ; } while (0)
6 #define arch_remove_exec_range(mm, limit)       do { ; } while (0)
7
8 /*
9  * This file contains the functions and defines necessary to allocate
10  * page tables.
11  *
12  * This hopefully works with any (fixed) ia-64 page-size, as defined
13  * in <asm/page.h> (currently 8192).
14  *
15  * Copyright (C) 1998-2001 Hewlett-Packard Co
16  *      David Mosberger-Tang <davidm@hpl.hp.com>
17  * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
18  */
19
20
21 #include <linux/compiler.h>
22 #include <linux/mm.h>
23 #include <linux/page-flags.h>
24 #include <linux/threads.h>
25
26 #include <asm/mmu_context.h>
27
28 DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
29 #define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
30 DECLARE_PER_CPU(long, __pgtable_quicklist_size);
31 #define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
32
33 static inline long pgtable_quicklist_total_size(void)
34 {
35         long ql_size = 0;
36         int cpuid;
37
38         for_each_online_cpu(cpuid) {
39                 ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
40         }
41         return ql_size;
42 }
43
44 static inline void *pgtable_quicklist_alloc(void)
45 {
46         unsigned long *ret = NULL;
47
48         preempt_disable();
49
50         ret = pgtable_quicklist;
51         if (likely(ret != NULL)) {
52                 pgtable_quicklist = (unsigned long *)(*ret);
53                 ret[0] = 0;
54                 --pgtable_quicklist_size;
55                 preempt_enable();
56         } else {
57                 preempt_enable();
58                 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
59         }
60
61         return ret;
62 }
63
64 static inline void pgtable_quicklist_free(void *pgtable_entry)
65 {
66 #ifdef CONFIG_NUMA
67         unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
68
69         if (unlikely(nid != numa_node_id())) {
70                 free_page((unsigned long)pgtable_entry);
71                 return;
72         }
73 #endif
74
75         preempt_disable();
76         *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
77         pgtable_quicklist = (unsigned long *)pgtable_entry;
78         ++pgtable_quicklist_size;
79         preempt_enable();
80 }
81
82 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
83 {
84         return pgtable_quicklist_alloc();
85 }
86
87 static inline void pgd_free(pgd_t * pgd)
88 {
89         pgtable_quicklist_free(pgd);
90 }
91
92 #ifdef CONFIG_PGTABLE_4
93 static inline void
94 pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
95 {
96         pgd_val(*pgd_entry) = __pa(pud);
97 }
98
99 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
100 {
101         return pgtable_quicklist_alloc();
102 }
103
104 static inline void pud_free(pud_t * pud)
105 {
106         pgtable_quicklist_free(pud);
107 }
108 #define __pud_free_tlb(tlb, pud)        pud_free(pud)
109 #endif /* CONFIG_PGTABLE_4 */
110
111 static inline void
112 pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
113 {
114         pud_val(*pud_entry) = __pa(pmd);
115 }
116
117 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
118 {
119         return pgtable_quicklist_alloc();
120 }
121
122 static inline void pmd_free(pmd_t * pmd)
123 {
124         pgtable_quicklist_free(pmd);
125 }
126
127 #define __pmd_free_tlb(tlb, pmd)        pmd_free(pmd)
128
129 static inline void
130 pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
131 {
132 #ifndef CONFIG_XEN
133         pmd_val(*pmd_entry) = page_to_phys(pte);
134 #else
135         pmd_val(*pmd_entry) = page_to_pseudophys(pte);
136 #endif
137 }
138
139 static inline void
140 pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
141 {
142         pmd_val(*pmd_entry) = __pa(pte);
143 }
144
145 static inline struct page *pte_alloc_one(struct mm_struct *mm,
146                                          unsigned long addr)
147 {
148         return virt_to_page(pgtable_quicklist_alloc());
149 }
150
151 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
152                                           unsigned long addr)
153 {
154         return pgtable_quicklist_alloc();
155 }
156
157 static inline void pte_free(struct page *pte)
158 {
159         pgtable_quicklist_free(page_address(pte));
160 }
161
162 static inline void pte_free_kernel(pte_t * pte)
163 {
164         pgtable_quicklist_free(pte);
165 }
166
167 #define __pte_free_tlb(tlb, pte)        pte_free(pte)
168
169 extern void check_pgt_cache(void);
170
171 #endif                          /* _ASM_IA64_PGALLOC_H */