1 #ifndef _X86_64_PGALLOC_H
2 #define _X86_64_PGALLOC_H
4 #include <asm/fixmap.h>
6 #include <linux/threads.h>
8 #include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
10 #define arch_add_exec_range(mm, limit) \
11 do { (void)(mm), (void)(limit); } while (0)
12 #define arch_flush_exec_range(mm) \
13 do { (void)(mm); } while (0)
14 #define arch_remove_exec_range(mm, limit) \
15 do { (void)(mm), (void)(limit); } while (0)
17 #include <xen/features.h>
18 void make_page_readonly(void *va, unsigned int feature);
19 void make_page_writable(void *va, unsigned int feature);
20 void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
21 void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
23 #define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
25 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
27 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
30 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
32 if (unlikely((mm)->context.pinned)) {
33 BUG_ON(HYPERVISOR_update_va_mapping(
34 (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
35 pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
36 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
38 *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
42 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
44 if (unlikely((mm)->context.pinned)) {
45 BUG_ON(HYPERVISOR_update_va_mapping(
47 pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
49 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
51 *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
56 * We need to use the batch mode here, but pgd_pupulate() won't be
57 * be called frequently.
59 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
61 if (unlikely((mm)->context.pinned)) {
62 BUG_ON(HYPERVISOR_update_va_mapping(
64 pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
66 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
67 set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
69 *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
70 *(__user_pgd(pgd)) = *(pgd);
74 static inline void pmd_free(pmd_t *pmd)
76 pte_t *ptep = virt_to_ptep(pmd);
78 if (!pte_write(*ptep)) {
79 BUG_ON(HYPERVISOR_update_va_mapping(
81 pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT, PAGE_KERNEL),
84 free_page((unsigned long)pmd);
87 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
89 pmd_t *pmd = (pmd_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
93 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
95 pud_t *pud = (pud_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
99 static inline void pud_free(pud_t *pud)
101 pte_t *ptep = virt_to_ptep(pud);
103 if (!pte_write(*ptep)) {
104 BUG_ON(HYPERVISOR_update_va_mapping(
106 pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT, PAGE_KERNEL),
109 free_page((unsigned long)pud);
112 static inline void pgd_list_add(pgd_t *pgd)
114 struct page *page = virt_to_page(pgd);
116 spin_lock(&pgd_lock);
117 page->index = (pgoff_t)pgd_list;
119 pgd_list->private = (unsigned long)&page->index;
121 page->private = (unsigned long)&pgd_list;
122 spin_unlock(&pgd_lock);
125 static inline void pgd_list_del(pgd_t *pgd)
127 struct page *next, **pprev, *page = virt_to_page(pgd);
129 spin_lock(&pgd_lock);
130 next = (struct page *)page->index;
131 pprev = (struct page **)page->private;
134 next->private = (unsigned long)pprev;
135 spin_unlock(&pgd_lock);
138 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
141 * We allocate two contiguous pages for kernel and user.
144 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
150 * Copy kernel pointers in from init.
151 * Could keep a freelist or slab cache of those because the kernel
152 * part never changes.
154 boundary = pgd_index(__PAGE_OFFSET);
155 memset(pgd, 0, boundary * sizeof(pgd_t));
156 memcpy(pgd + boundary,
157 init_level4_pgt + boundary,
158 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
160 memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
162 * Set level3_user_pgt for vsyscall area
164 set_pgd(__user_pgd(pgd) + pgd_index(VSYSCALL_START),
165 mk_kernel_pgd(__pa_symbol(level3_user_pgt)));
169 static inline void pgd_free(pgd_t *pgd)
171 pte_t *ptep = virt_to_ptep(pgd);
173 if (!pte_write(*ptep)) {
174 xen_pgd_unpin(__pa(pgd));
175 BUG_ON(HYPERVISOR_update_va_mapping(
177 pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
181 ptep = virt_to_ptep(__user_pgd(pgd));
183 if (!pte_write(*ptep)) {
184 xen_pgd_unpin(__pa(__user_pgd(pgd)));
185 BUG_ON(HYPERVISOR_update_va_mapping(
186 (unsigned long)__user_pgd(pgd),
187 pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
193 free_pages((unsigned long)pgd, 1);
196 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
198 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
200 make_page_readonly(pte, XENFEAT_writable_page_tables);
205 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
209 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
213 /* Should really implement gc for free page table pages. This could be
214 done with a reference count in struct page. */
216 static inline void pte_free_kernel(pte_t *pte)
218 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
219 make_page_writable(pte, XENFEAT_writable_page_tables);
220 free_page((unsigned long)pte);
223 extern void pte_free(struct page *pte);
225 //#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
226 //#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
227 //#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
229 #define __pte_free_tlb(tlb,x) pte_free((x))
230 #define __pmd_free_tlb(tlb,x) pmd_free((x))
231 #define __pud_free_tlb(tlb,x) pud_free((x))
233 #endif /* _X86_64_PGALLOC_H */