1 #ifndef _I386_PGTABLE_3LEVEL_H
2 #define _I386_PGTABLE_3LEVEL_H
5 * Intel Physical Address Extension (PAE) Mode - three-level page
6 * tables on PPro+ CPUs.
8 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
11 #define pte_ERROR(e) \
12 printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, &(e), (e).pte_high, (e).pte_low)
13 #define pmd_ERROR(e) \
14 printk("%s:%d: bad pmd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pmd_val(e))
15 #define pgd_ERROR(e) \
16 printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e))
18 static inline int pgd_none(pgd_t pgd) { return 0; }
19 static inline int pgd_bad(pgd_t pgd) { return 0; }
20 static inline int pgd_present(pgd_t pgd) { return 1; }
23 * Is the pte executable?
25 static inline int pte_x(pte_t pte)
27 return !(pte_val(pte) & _PAGE_NX);
31 * All present user-pages with !NX bit are user-executable:
33 static inline int pte_exec(pte_t pte)
35 return pte_user(pte) && pte_x(pte);
38 * All present pages with !NX bit are kernel-executable:
40 static inline int pte_exec_kernel(pte_t pte)
45 /* Rules for using set_pte: the pte being assigned *must* be
46 * either not present or in a state where the hardware will
47 * not attempt to update the pte. In places where this is
48 * not possible, use pte_get_and_clear to obtain the old pte
49 * value and then use set_pte to update it. -ben
51 static inline void set_pte(pte_t *ptep, pte_t pte)
53 ptep->pte_high = pte.pte_high;
55 ptep->pte_low = pte.pte_low;
57 #define __HAVE_ARCH_SET_PTE_ATOMIC
58 #define set_pte_atomic(pteptr,pteval) \
59 set_64bit((unsigned long long *)(pteptr),pte_val(pteval))
60 #define set_pmd(pmdptr,pmdval) \
61 set_64bit((unsigned long long *)(pmdptr),pmd_val(pmdval))
62 #define set_pgd(pgdptr,pgdval) \
63 set_64bit((unsigned long long *)(pgdptr),pgd_val(pgdval))
66 * Pentium-II erratum A13: in PAE mode we explicitly have to flush
67 * the TLB via cr3 if the top-level pgd is changed...
68 * We do not let the generic code free and clear pgd entries due to
71 static inline void pgd_clear (pgd_t * pgd) { }
73 #define pgd_page(pgd) \
74 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK))
76 /* Find an entry in the second-level page table.. */
77 #define pmd_offset(dir, address) ((pmd_t *) pgd_page(*(dir)) + \
80 static inline pte_t ptep_get_and_clear(pte_t *ptep)
84 /* xchg acts as a barrier before the setting of the high bits */
85 res.pte_low = xchg(&ptep->pte_low, 0);
86 res.pte_high = ptep->pte_high;
92 static inline int pte_same(pte_t a, pte_t b)
94 return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
97 #define pte_page(x) pfn_to_page(pte_pfn(x))
99 static inline int pte_none(pte_t pte)
101 return !pte.pte_low && !pte.pte_high;
104 static inline unsigned long pte_pfn(pte_t pte)
106 return (pte.pte_low >> PAGE_SHIFT) |
107 (pte.pte_high << (32 - PAGE_SHIFT));
110 extern unsigned long long __supported_pte_mask;
112 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
116 pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
117 (pgprot_val(pgprot) >> 32);
118 pte.pte_high &= (__supported_pte_mask >> 32);
119 pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
120 __supported_pte_mask;
124 static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
126 return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | \
127 pgprot_val(pgprot)) & __supported_pte_mask);
131 * Bits 0, 6 and 7 are taken in the low part of the pte,
132 * put the 32 bits of offset into the high part.
134 #define pte_to_pgoff(pte) ((pte).pte_high)
135 #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
136 #define PTE_FILE_MAX_BITS 32
138 /* Encode and de-code a swap entry */
139 #define __swp_type(x) (((x).val) & 0x1f)
140 #define __swp_offset(x) ((x).val >> 5)
141 #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
142 #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
143 #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
145 #endif /* _I386_PGTABLE_3LEVEL_H */