4 #include <linux/config.h>
5 /* #include <linux/string.h> */
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <xen/features.h>
12 #include <xen/interface/xen.h>
13 #include <xen/foreign_page.h>
15 #define arch_free_page(_page,_order) \
16 ({ int foreign = PageForeign(_page); \
18 (PageForeignDestructor(_page))(_page); \
21 #define HAVE_ARCH_FREE_PAGE
23 #ifdef CONFIG_XEN_SCRUB_PAGES
24 #define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
26 #define scrub_pages(_p,_n) ((void)0)
29 /* PAGE_SHIFT determines the page size */
32 #define PAGE_SIZE (0x1 << PAGE_SHIFT)
34 #define PAGE_SIZE (1UL << PAGE_SHIFT)
36 #define PAGE_MASK (~(PAGE_SIZE-1))
37 #define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
39 #define THREAD_ORDER 1
40 #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
41 #define CURRENT_MASK (~(THREAD_SIZE-1))
43 #define EXCEPTION_STACK_ORDER 0
44 #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
46 #define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER
47 #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
49 #define IRQSTACK_ORDER 2
50 #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
52 #define STACKFAULT_STACK 1
53 #define DOUBLEFAULT_STACK 2
57 #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
59 #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
60 #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
62 #define HPAGE_SHIFT PMD_SHIFT
63 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
64 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
65 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
70 extern unsigned long end_pfn;
72 void clear_page(void *);
73 void copy_page(void *, void *);
75 #define clear_user_page(page, vaddr, pg) clear_page(page)
76 #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
78 #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
79 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
81 /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
82 #define INVALID_P2M_ENTRY (~0UL)
83 #define FOREIGN_FRAME_BIT (1UL<<63)
84 #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
86 extern unsigned long *phys_to_machine_mapping;
88 #undef machine_to_phys_mapping
89 extern unsigned long *machine_to_phys_mapping;
90 extern unsigned int machine_to_phys_order;
92 static inline unsigned long pfn_to_mfn(unsigned long pfn)
94 if (xen_feature(XENFEAT_auto_translated_physmap))
96 return phys_to_machine_mapping[(unsigned int)(pfn)] &
100 static inline int phys_to_machine_mapping_valid(unsigned long pfn)
102 if (xen_feature(XENFEAT_auto_translated_physmap))
104 return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
107 static inline unsigned long mfn_to_pfn(unsigned long mfn)
111 if (xen_feature(XENFEAT_auto_translated_physmap))
114 if (unlikely((mfn >> machine_to_phys_order) != 0))
117 /* The array access can fail (e.g., device space beyond end of RAM). */
121 ".section .fixup,\"ax\"\n"
125 ".section __ex_table,\"a\"\n"
130 : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
136 * We detect special mappings in one of two ways:
137 * 1. If the MFN is an I/O page then Xen will set the m2p entry
138 * to be outside our maximum possible pseudophys range.
139 * 2. If the MFN belongs to a different domain then we will certainly
140 * not have MFN in our p2m table. Conversely, if the page is ours,
141 * then we'll have p2m(m2p(MFN))==MFN.
142 * If we detect a special mapping then it doesn't have a 'struct page'.
143 * We force !pfn_valid() by returning an out-of-range pointer.
145 * NB. These checks require that, for any MFN that is not in our reservation,
146 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
147 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
148 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
150 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
151 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
152 * require. In all the cases we care about, the FOREIGN_FRAME bit is
153 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
155 static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
157 unsigned long pfn = mfn_to_pfn(mfn);
159 && !xen_feature(XENFEAT_auto_translated_physmap)
160 && (phys_to_machine_mapping[pfn] != mfn))
161 return end_pfn; /* force !pfn_valid() */
166 static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
168 if (xen_feature(XENFEAT_auto_translated_physmap)) {
169 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
172 phys_to_machine_mapping[pfn] = mfn;
175 /* Definitions for machine and pseudophysical addresses. */
176 typedef unsigned long paddr_t;
177 typedef unsigned long maddr_t;
179 static inline maddr_t phys_to_machine(paddr_t phys)
181 maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
182 machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
186 static inline paddr_t machine_to_phys(maddr_t machine)
188 paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
189 phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
194 * These are used to make use of C type-checking..
196 typedef struct { unsigned long pte; } pte_t;
197 typedef struct { unsigned long pmd; } pmd_t;
198 typedef struct { unsigned long pud; } pud_t;
199 typedef struct { unsigned long pgd; } pgd_t;
200 #define PTE_MASK PHYSICAL_PAGE_MASK
202 typedef struct { unsigned long pgprot; } pgprot_t;
204 #define pte_val(x) (((x).pte & 1) ? machine_to_phys((x).pte) : \
206 #define pte_val_ma(x) ((x).pte)
208 static inline unsigned long pmd_val(pmd_t x)
210 unsigned long ret = x.pmd;
211 if (ret) ret = machine_to_phys(ret);
215 static inline unsigned long pud_val(pud_t x)
217 unsigned long ret = x.pud;
218 if (ret) ret = machine_to_phys(ret);
222 static inline unsigned long pgd_val(pgd_t x)
224 unsigned long ret = x.pgd;
225 if (ret) ret = machine_to_phys(ret);
229 #define pgprot_val(x) ((x).pgprot)
231 #define __pte_ma(x) ((pte_t) { (x) } )
233 static inline pte_t __pte(unsigned long x)
235 if (x & 1) x = phys_to_machine(x);
236 return ((pte_t) { (x) });
239 static inline pmd_t __pmd(unsigned long x)
241 if ((x & 1)) x = phys_to_machine(x);
242 return ((pmd_t) { (x) });
245 static inline pud_t __pud(unsigned long x)
247 if ((x & 1)) x = phys_to_machine(x);
248 return ((pud_t) { (x) });
251 static inline pgd_t __pgd(unsigned long x)
253 if ((x & 1)) x = phys_to_machine(x);
254 return ((pgd_t) { (x) });
257 #define __pgprot(x) ((pgprot_t) { (x) } )
259 #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
260 #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
261 #define __START_KERNEL_map 0xffffffff80000000UL
262 #define __PAGE_OFFSET 0xffff880000000000UL
265 #define __PHYSICAL_START CONFIG_PHYSICAL_START
266 #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
267 #define __START_KERNEL_map 0xffffffff80000000
268 #define __PAGE_OFFSET 0xffff880000000000
269 #endif /* !__ASSEMBLY__ */
271 #ifdef CONFIG_XEN_COMPAT_030002
273 #define LOAD_OFFSET 0
274 #endif /* CONFIG_XEN_COMPAT_030002 */
276 /* to align the pointer to the (next) page boundary */
277 #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
279 /* See Documentation/x86_64/mm.txt for a description of the memory map. */
280 #define __PHYSICAL_MASK_SHIFT 46
281 #define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
282 #define __VIRTUAL_MASK_SHIFT 48
283 #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
285 #define KERNEL_TEXT_SIZE (40UL*1024*1024)
286 #define KERNEL_TEXT_START 0xffffffff80000000UL
288 #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
290 /* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
291 Otherwise you risk miscompilation. */
292 #define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
293 /* __pa_symbol should be used for C visible symbols.
294 This seems to be the official gcc blessed way to do such arithmetic. */
295 #define __pa_symbol(x) \
297 asm("" : "=r" (v) : "0" (x)); \
300 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
301 #define __boot_va(x) __va(x)
302 #define __boot_pa(x) __pa(x)
303 #ifdef CONFIG_FLATMEM
304 #define pfn_valid(pfn) ((pfn) < end_pfn)
307 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
308 #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
309 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
311 /* VIRT <-> MACHINE conversion */
312 #define virt_to_machine(v) (phys_to_machine(__pa(v)))
313 #define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
314 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
316 #define VM_DATA_DEFAULT_FLAGS \
317 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
318 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
320 #define __HAVE_ARCH_GATE_AREA 1
323 extern int devmem_is_allowed(unsigned long pagenr);
326 #endif /* __KERNEL__ */
328 #include <asm-generic/memory_model.h>
329 #include <asm-generic/page.h>
331 #endif /* _X86_64_PAGE_H */