1 #ifndef _I386_PGTABLE_2LEVEL_H
2 #define _I386_PGTABLE_2LEVEL_H
4 #include <asm-generic/pgtable-nopmd.h>
7 printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
9 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
12 * Certain architectures need to do special things when PTEs
13 * within a page table are directly modified. Thus, the following
14 * hook is made available.
16 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
17 #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
19 #ifndef CONFIG_XEN_SHADOW_MODE
20 #define set_pmd(pmdptr, pmdval) xen_l2_entry_update((pmdptr), (pmdval))
22 #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
25 #define ptep_get_and_clear(mm, address, xp) __pte_ma(xchg(&(xp)->pte_low, 0))
26 #define pte_same(a, b) ((a).pte_low == (b).pte_low)
28 * We detect special mappings in one of two ways:
29 * 1. If the MFN is an I/O page then Xen will set the m2p entry
30 * to be outside our maximum possible pseudophys range.
31 * 2. If the MFN belongs to a different domain then we will certainly
32 * not have MFN in our p2m table. Conversely, if the page is ours,
33 * then we'll have p2m(m2p(MFN))==MFN.
34 * If we detect a special mapping then it doesn't have a 'struct page'.
35 * We force !pfn_valid() by returning an out-of-range pointer.
37 * NB. These checks require that, for any MFN that is not in our reservation,
38 * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
39 * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
40 * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
42 * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
43 * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
44 * require. In all the cases we care about, the high bit gets shifted out
45 * (e.g., phys_to_machine()) so behaviour there is correct.
47 #define INVALID_P2M_ENTRY (~0U)
48 #define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
49 #define pte_pfn(_pte) \
51 unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
52 unsigned long pfn = mfn_to_pfn(mfn); \
53 if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn)) \
54 pfn = max_mapnr; /* special: force !pfn_valid() */ \
58 #define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
60 #define pte_none(x) (!(x).pte_low)
61 #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
62 #define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
63 #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
65 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
67 #define pmd_page_kernel(pmd) \
68 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
71 * All present user pages are user-executable:
73 static inline int pte_exec(pte_t pte)
79 * All present pages are kernel-executable:
81 static inline int pte_exec_kernel(pte_t pte)
87 * Bits 0, 6 and 7 are taken, split up the 29 bits of offset
90 #define PTE_FILE_MAX_BITS 29
92 #define pte_to_pgoff(pte) \
93 ((((pte).pte_low >> 1) & 0x1f ) + (((pte).pte_low >> 8) << 5 ))
95 #define pgoff_to_pte(off) \
96 ((pte_t) { (((off) & 0x1f) << 1) + (((off) >> 5) << 8) + _PAGE_FILE })
98 /* Encode and de-code a swap entry */
99 #define __swp_type(x) (((x).val >> 1) & 0x1f)
100 #define __swp_offset(x) ((x).val >> 8)
101 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
102 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
103 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
105 #endif /* _I386_PGTABLE_2LEVEL_H */