1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
4 #include <asm/hypervisor.h>
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
16 #include <asm/processor.h>
17 #include <asm/fixmap.h>
18 #include <linux/threads.h>
19 #include <asm/paravirt.h>
21 #ifndef _I386_BITOPS_H
22 #include <asm/bitops.h>
25 #include <linux/slab.h>
26 #include <linux/list.h>
27 #include <linux/spinlock.h>
30 struct vm_area_struct;
33 * ZERO_PAGE is a global shared page that is always zero: used
34 * for zero-mapped memory areas etc..
36 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
37 extern unsigned long empty_zero_page[1024];
38 extern pgd_t *swapper_pg_dir;
39 extern struct kmem_cache *pgd_cache;
40 extern struct kmem_cache *pmd_cache;
41 extern spinlock_t pgd_lock;
42 extern struct page *pgd_list;
44 void pmd_ctor(void *, struct kmem_cache *, unsigned long);
45 void pgd_ctor(void *, struct kmem_cache *, unsigned long);
46 void pgd_dtor(void *, struct kmem_cache *, unsigned long);
47 void pgtable_cache_init(void);
48 void paging_init(void);
51 * The Linux x86 paging architecture is 'compile-time dual-mode', it
52 * implements both the traditional 2-level x86 page tables and the
53 * newer 3-level PAE-mode page tables.
56 # include <asm/pgtable-3level-defs.h>
57 # define PMD_SIZE (1UL << PMD_SHIFT)
58 # define PMD_MASK (~(PMD_SIZE-1))
60 # include <asm/pgtable-2level-defs.h>
63 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
64 #define PGDIR_MASK (~(PGDIR_SIZE-1))
66 #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
67 #define FIRST_USER_ADDRESS 0
69 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
70 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
72 #define TWOLEVEL_PGDIR_SHIFT 22
73 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
74 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
76 /* Just any arbitrary offset to the start of the vmalloc VM area: the
77 * current 8MB value just means that there will be a 8MB "hole" after the
78 * physical memory until the kernel virtual memory starts. That means that
79 * any out-of-bounds memory accesses will hopefully be caught.
80 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
81 * area for the same reason. ;)
83 #define VMALLOC_OFFSET (8*1024*1024)
84 #define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
85 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
87 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
89 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
93 * _PAGE_PSE set in the page directory entry just means that
94 * the page directory entry points directly to a 4MB-aligned block of
97 #define _PAGE_BIT_PRESENT 0
98 #define _PAGE_BIT_RW 1
99 #define _PAGE_BIT_USER 2
100 #define _PAGE_BIT_PWT 3
101 #define _PAGE_BIT_PCD 4
102 #define _PAGE_BIT_ACCESSED 5
103 #define _PAGE_BIT_DIRTY 6
104 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
105 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
106 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
107 #define _PAGE_BIT_UNUSED2 10
108 #define _PAGE_BIT_UNUSED3 11
109 #define _PAGE_BIT_NX 63
111 #define _PAGE_PRESENT 0x001
112 #define _PAGE_RW 0x002
113 #define _PAGE_USER 0x004
114 #define _PAGE_PWT 0x008
115 #define _PAGE_PCD 0x010
116 #define _PAGE_ACCESSED 0x020
117 #define _PAGE_DIRTY 0x040
118 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
119 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
120 #define _PAGE_UNUSED1 0x200 /* available for programmer */
121 #define _PAGE_UNUSED2 0x400
122 #define _PAGE_UNUSED3 0x800
124 /* If _PAGE_PRESENT is clear, we use these: */
125 #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
126 #define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
127 pte_present gives true */
128 #ifdef CONFIG_X86_PAE
129 #define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
134 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
135 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
136 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
139 __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
140 #define PAGE_SHARED \
141 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
143 #define PAGE_SHARED_EXEC \
144 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
145 #define PAGE_COPY_NOEXEC \
146 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
147 #define PAGE_COPY_EXEC \
148 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
151 #define PAGE_READONLY \
152 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
153 #define PAGE_READONLY_EXEC \
154 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
156 #define _PAGE_KERNEL \
157 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
158 #define _PAGE_KERNEL_EXEC \
159 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
161 extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
162 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
163 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
164 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
165 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
167 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
168 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
169 #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
170 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
171 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
172 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
175 * The i386 can't do page protection for execute, and considers that
176 * the same are read. Also, write permissions imply read permissions.
177 * This is the closest we can get..
179 #define __P000 PAGE_NONE
180 #define __P001 PAGE_READONLY
181 #define __P010 PAGE_COPY
182 #define __P011 PAGE_COPY
183 #define __P100 PAGE_READONLY_EXEC
184 #define __P101 PAGE_READONLY_EXEC
185 #define __P110 PAGE_COPY_EXEC
186 #define __P111 PAGE_COPY_EXEC
188 #define __S000 PAGE_NONE
189 #define __S001 PAGE_READONLY
190 #define __S010 PAGE_SHARED
191 #define __S011 PAGE_SHARED
192 #define __S100 PAGE_READONLY_EXEC
193 #define __S101 PAGE_READONLY_EXEC
194 #define __S110 PAGE_SHARED_EXEC
195 #define __S111 PAGE_SHARED_EXEC
198 * Define this if things work differently on an i386 and an i486:
199 * it will (on an i486) warn about kernel memory accesses that are
200 * done without a 'access_ok(VERIFY_WRITE,..)'
202 #undef TEST_ACCESS_OK
204 /* The boot page tables (all created as a single array) */
205 extern unsigned long pg0[];
207 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
209 /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
210 #define pmd_none(x) (!(unsigned long)pmd_val(x))
211 /* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
212 can temporarily clear it. */
213 #define pmd_present(x) (pmd_val(x))
214 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
217 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
220 * The following only work if pte_present() is true.
221 * Undefined behaviour if not..
223 static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
224 static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
225 static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
226 static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
227 static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
228 static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
231 * The following only works if pte_present() is not true.
233 static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
235 static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
236 static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
237 static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
238 static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
239 static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
240 static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
241 static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
242 static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
243 static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
244 static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
245 static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
247 #ifdef CONFIG_X86_PAE
248 # include <asm/pgtable-3level.h>
250 # include <asm/pgtable-2level.h>
253 #ifndef CONFIG_PARAVIRT
255 * Rules for using pte_update - it must be called after any PTE update which
256 * has not been done using the set_pte / clear_pte interfaces. It is used by
257 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
258 * updates should either be sets, clears, or set_pte_atomic for P->P
259 * transitions, which means this hook should only be called for user PTEs.
260 * This hook implies a P->P protection or access change has taken place, which
261 * requires a subsequent TLB flush. The notification can optionally be delayed
262 * until the TLB flush event by using the pte_update_defer form of the
263 * interface, but care must be taken to assure that the flush happens while
264 * still holding the same page table lock so that the shadow and primary pages
265 * do not become out of sync on SMP.
267 #define pte_update(mm, addr, ptep) do { } while (0)
268 #define pte_update_defer(mm, addr, ptep) do { } while (0)
272 * We only update the dirty/accessed state if we set
273 * the dirty bit by hand in the kernel, since the hardware
274 * will do the accessed bit for us, and we don't want to
275 * race with other CPU's that might be updating the dirty
276 * bit at the same time.
278 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
279 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
282 if ( likely((__vma)->vm_mm == current->mm) ) { \
283 BUG_ON(HYPERVISOR_update_va_mapping((__address), (__entry), UVMF_INVLPG|UVMF_MULTI|(unsigned long)((__vma)->vm_mm->cpu_vm_mask.bits))); \
284 pte_update_defer((__vma)->vm_mm, (__address), (__ptep)); \
286 xen_l1_entry_update((__ptep), (__entry)); \
287 pte_update_defer((__vma)->vm_mm, (__address), (__ptep)); \
288 flush_tlb_page((__vma), (__address)); \
294 * We don't actually have these, but we want to advertise them so that
295 * we can encompass the flush here.
297 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
298 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
300 #define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
301 #define ptep_clear_flush_dirty(vma, address, ptep) \
304 __dirty = pte_dirty(*(ptep)); \
306 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
307 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
308 flush_tlb_page(vma, address); \
313 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
314 #define ptep_clear_flush_young(vma, address, ptep) \
317 __young = pte_young(*(ptep)); \
319 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
320 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
321 flush_tlb_page(vma, address); \
326 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
327 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
329 pte_t pte = raw_ptep_get_and_clear(ptep);
330 pte_update(mm, addr, ptep);
334 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
335 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
340 pte_clear(mm, addr, ptep);
342 pte = ptep_get_and_clear(mm, addr, ptep);
347 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
348 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
350 if (pte_write(*ptep))
351 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
352 pte_update(mm, addr, ptep);
356 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
358 * dst - pointer to pgd range anwhere on a pgd page
360 * count - the number of pgds to copy.
362 * dst and src can be on the same page, but the range must not overlap,
363 * and must not cross a page boundary.
365 static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
367 memcpy(dst, src, count * sizeof(pgd_t));
371 * Macro to mark a page protection value as "uncacheable". On processors which do not support
372 * it, this is a no-op.
374 #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
375 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
378 * Conversion functions: convert a page and protection to a page entry,
379 * and a page entry and page directory to the page they refer to.
382 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
384 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
386 pte.pte_low &= _PAGE_CHG_MASK;
387 pte.pte_low |= pgprot_val(newprot);
388 #ifdef CONFIG_X86_PAE
390 * Chop off the NX bit (if present), and add the NX portion of
391 * the newprot (if present):
393 pte.pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
394 pte.pte_high |= (pgprot_val(newprot) >> 32) & \
395 (__supported_pte_mask >> 32);
400 #define pmd_large(pmd) \
401 ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
404 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
406 * this macro returns the index of the entry in the pgd page which would
407 * control the given virtual address
409 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
410 #define pgd_index_k(addr) pgd_index(addr)
413 * pgd_offset() returns a (pgd_t *)
414 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
416 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
419 * a shortcut which implies the use of the kernel's pgd, instead
422 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
425 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
427 * this macro returns the index of the entry in the pmd page which would
428 * control the given virtual address
430 #define pmd_index(address) \
431 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
434 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
436 * this macro returns the index of the entry in the pte page which would
437 * control the given virtual address
439 #define pte_index(address) \
440 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
441 #define pte_offset_kernel(dir, address) \
442 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
444 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
446 #define pmd_page_vaddr(pmd) \
447 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
450 * Helper function that returns the kernel pagetable entry controlling
451 * the virtual address 'address'. NULL means no pagetable entry present.
452 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
455 extern pte_t *lookup_address(unsigned long address);
458 * Make a given kernel text page executable/non-executable.
459 * Returns the previous executability setting of that page (which
460 * is used to restore the previous state). Used by the SMP bootup code.
461 * NOTE: this is an __init function for security reasons.
463 #ifdef CONFIG_X86_PAE
464 extern int set_kernel_exec(unsigned long vaddr, int enable);
466 static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
469 #if defined(CONFIG_HIGHPTE)
470 #define pte_offset_map(dir, address) \
471 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
473 #define pte_offset_map_nested(dir, address) \
474 ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
476 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
477 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
479 #define pte_offset_map(dir, address) \
480 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
481 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
482 #define pte_unmap(pte) do { } while (0)
483 #define pte_unmap_nested(pte) do { } while (0)
486 /* Clear a kernel PTE and flush it from the TLB */
487 #define kpte_clear_flush(ptep, vaddr) \
489 pte_clear(&init_mm, vaddr, ptep); \
490 __flush_tlb_one(vaddr); \
494 * The i386 doesn't have any external MMU info: the kernel page
495 * tables contain all the necessary information.
497 #define update_mmu_cache(vma,address,pte) do { } while (0)
498 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
500 #define __HAVE_ARCH_PTEP_ESTABLISH
501 #define ptep_establish(__vma, __address, __ptep, __entry) \
503 ptep_set_access_flags(__vma, __address, __ptep, __entry, 1); \
506 #include <xen/features.h>
507 void make_lowmem_page_readonly(void *va, unsigned int feature);
508 void make_lowmem_page_writable(void *va, unsigned int feature);
509 void make_page_readonly(void *va, unsigned int feature);
510 void make_page_writable(void *va, unsigned int feature);
511 void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
512 void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
514 #define virt_to_ptep(__va) \
516 pgd_t *__pgd = pgd_offset_k((unsigned long)(__va)); \
517 pud_t *__pud = pud_offset(__pgd, (unsigned long)(__va)); \
518 pmd_t *__pmd = pmd_offset(__pud, (unsigned long)(__va)); \
519 pte_offset_kernel(__pmd, (unsigned long)(__va)); \
522 #define arbitrary_virt_to_machine(__va) \
524 maddr_t m = (maddr_t)pte_mfn(*virt_to_ptep(__va)) << PAGE_SHIFT;\
525 m | ((unsigned long)(__va) & (PAGE_SIZE-1)); \
528 #endif /* !__ASSEMBLY__ */
530 #ifdef CONFIG_FLATMEM
531 #define kern_addr_valid(addr) (1)
532 #endif /* CONFIG_FLATMEM */
534 int direct_remap_pfn_range(struct vm_area_struct *vma,
535 unsigned long address,
540 int direct_kernel_remap_pfn_range(unsigned long address,
545 int create_lookup_pte_addr(struct mm_struct *mm,
546 unsigned long address,
548 int touch_pte_range(struct mm_struct *mm,
549 unsigned long address,
552 #define io_remap_pfn_range(vma,vaddr,pfn,size,prot) \
553 direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
555 #define MK_IOSPACE_PFN(space, pfn) (pfn)
556 #define GET_IOSPACE(pfn) 0
557 #define GET_PFN(pfn) (pfn)
559 #include <asm-generic/pgtable.h>
561 #endif /* _I386_PGTABLE_H */