1 #ifndef _I386_PGTABLE_H
2 #define _I386_PGTABLE_H
4 #include <linux/config.h>
7 * The Linux memory management assumes a three-level page table setup. On
8 * the i386, we use that, but "fold" the mid level into the top-level page
9 * table, so that we physically have the same two-level page table as the
12 * This file contains the functions and defines necessary to modify and use
13 * the i386 page table tree.
16 #include <asm/processor.h>
17 #include <asm/fixmap.h>
18 #include <linux/threads.h>
19 #include <linux/slab.h>
21 #ifndef _I386_BITOPS_H
22 #include <asm/bitops.h>
25 extern pgd_t swapper_pg_dir[1024];
26 extern kmem_cache_t *pgd_cache, *pmd_cache, *kpmd_cache;
27 extern spinlock_t pgd_lock;
28 extern struct page *pgd_list;
29 void pmd_ctor(void *, kmem_cache_t *, unsigned long);
30 void kpmd_ctor(void *, kmem_cache_t *, unsigned long);
31 void pgd_ctor(void *, kmem_cache_t *, unsigned long);
32 void pgd_dtor(void *, kmem_cache_t *, unsigned long);
33 void pgtable_cache_init(void);
34 extern void paging_init(void);
35 void setup_identity_mappings(pgd_t *pgd_base, unsigned long start, unsigned long end);
38 * ZERO_PAGE is a global shared page that is always zero: used
39 * for zero-mapped memory areas etc..
41 extern unsigned long empty_zero_page[1024];
42 #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
44 #endif /* !__ASSEMBLY__ */
47 * The Linux x86 paging architecture is 'compile-time dual-mode', it
48 * implements both the traditional 2-level x86 page tables and the
49 * newer 3-level PAE-mode page tables.
53 extern void set_system_gate(unsigned int n, void *addr);
54 extern void init_entry_mappings(void);
55 extern void entry_trampoline_setup(void);
58 # include <asm/pgtable-3level.h>
60 # include <asm/pgtable-2level.h>
64 #define PMD_SIZE (1UL << PMD_SHIFT)
65 #define PMD_MASK (~(PMD_SIZE-1))
66 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
67 #define PGDIR_MASK (~(PGDIR_SIZE-1))
69 #if defined(CONFIG_X86_PAE) && defined(CONFIG_X86_4G_VM_LAYOUT)
70 # define USER_PTRS_PER_PGD 4
72 # define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) + ((TASK_SIZE % PGDIR_SIZE) + PGDIR_SIZE-1)/PGDIR_SIZE)
75 #define FIRST_USER_PGD_NR 0
77 #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
78 #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
80 #define TWOLEVEL_PGDIR_SHIFT 22
81 #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
82 #define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
86 /* Just any arbitrary offset to the start of the vmalloc VM area: the
87 * current 8MB value just means that there will be a 8MB "hole" after the
88 * physical memory until the kernel virtual memory starts. That means that
89 * any out-of-bounds memory accesses will hopefully be caught.
90 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
91 * area for the same reason. ;)
93 #define VMALLOC_OFFSET (8*1024*1024)
94 #define VMALLOC_START (((unsigned long) high_memory + 2*VMALLOC_OFFSET-1) & \
97 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
99 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
103 * The 4MB page is guessing.. Detailed in the infamous "Chapter H"
104 * of the Pentium details, but assuming intel did the straightforward
105 * thing, this bit set in the page directory entry just means that
106 * the page directory entry points directly to a 4MB-aligned block of
109 #define _PAGE_BIT_PRESENT 0
110 #define _PAGE_BIT_RW 1
111 #define _PAGE_BIT_USER 2
112 #define _PAGE_BIT_PWT 3
113 #define _PAGE_BIT_PCD 4
114 #define _PAGE_BIT_ACCESSED 5
115 #define _PAGE_BIT_DIRTY 6
116 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
117 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
118 #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
119 #define _PAGE_BIT_UNUSED2 10
120 #define _PAGE_BIT_UNUSED3 11
122 #define _PAGE_PRESENT 0x001
123 #define _PAGE_RW 0x002
124 #define _PAGE_USER 0x004
125 #define _PAGE_PWT 0x008
126 #define _PAGE_PCD 0x010
127 #define _PAGE_ACCESSED 0x020
128 #define _PAGE_DIRTY 0x040
129 #define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
130 #define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
131 #define _PAGE_UNUSED1 0x200 /* available for programmer */
132 #define _PAGE_UNUSED2 0x400
133 #define _PAGE_UNUSED3 0x800
135 #define _PAGE_FILE 0x040 /* set:pagecache unset:swap */
136 #define _PAGE_PROTNONE 0x080 /* If not present */
138 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
139 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
140 #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
142 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
143 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
144 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
145 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
147 #define _PAGE_KERNEL \
148 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
150 extern unsigned long __PAGE_KERNEL;
151 #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
152 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
153 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
155 #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
156 #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
157 #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
158 #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
161 * The i386 can't do page protection for execute, and considers that
162 * the same are read. Also, write permissions imply read permissions.
163 * This is the closest we can get..
165 #define __P000 PAGE_NONE
166 #define __P001 PAGE_READONLY
167 #define __P010 PAGE_COPY
168 #define __P011 PAGE_COPY
169 #define __P100 PAGE_READONLY
170 #define __P101 PAGE_READONLY
171 #define __P110 PAGE_COPY
172 #define __P111 PAGE_COPY
174 #define __S000 PAGE_NONE
175 #define __S001 PAGE_READONLY
176 #define __S010 PAGE_SHARED
177 #define __S011 PAGE_SHARED
178 #define __S100 PAGE_READONLY
179 #define __S101 PAGE_READONLY
180 #define __S110 PAGE_SHARED
181 #define __S111 PAGE_SHARED
184 * Define this if things work differently on an i386 and an i486:
185 * it will (on an i486) warn about kernel memory accesses that are
186 * done without a 'verify_area(VERIFY_WRITE,..)'
188 #undef TEST_VERIFY_AREA
190 /* The boot page tables (all created as a single array) */
191 extern unsigned long pg0[];
193 #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
194 #define pte_clear(xp) do { set_pte(xp, __pte(0)); } while (0)
196 #define pmd_none(x) (!pmd_val(x))
197 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
198 #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
199 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
202 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
205 * The following only work if pte_present() is true.
206 * Undefined behaviour if not..
208 static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
209 static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
210 static inline int pte_exec(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
211 static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
212 static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
213 static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
216 * The following only works if pte_present() is not true.
218 static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
220 static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
221 static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
222 static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
223 static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
224 static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
225 static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
226 static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
227 static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
228 static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
229 static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
231 static inline int ptep_test_and_clear_dirty(pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); }
232 static inline int ptep_test_and_clear_young(pte_t *ptep) { return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); }
233 static inline void ptep_set_wrprotect(pte_t *ptep) { clear_bit(_PAGE_BIT_RW, &ptep->pte_low); }
234 static inline void ptep_mkdirty(pte_t *ptep) { set_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); }
237 * Macro to mark a page protection value as "uncacheable". On processors which do not support
238 * it, this is a no-op.
240 #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
241 ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
244 * Conversion functions: convert a page and protection to a page entry,
245 * and a page entry and page directory to the page they refer to.
248 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
249 #define mk_pte_huge(entry) ((entry).pte_low |= _PAGE_PRESENT | _PAGE_PSE)
250 #define mk_pte_phys(physpage, pgprot) pfn_pte((physpage) >> PAGE_SHIFT, pgprot)
252 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
254 pte.pte_low &= _PAGE_CHG_MASK;
255 pte.pte_low |= pgprot_val(newprot);
259 #define page_pte(page) page_pte_prot(page, __pgprot(0))
261 #define pmd_page_kernel(pmd) \
262 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
264 #ifndef CONFIG_DISCONTIGMEM
265 #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
266 #endif /* !CONFIG_DISCONTIGMEM */
268 #define pmd_large(pmd) \
269 ((pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
272 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
274 * this macro returns the index of the entry in the pgd page which would
275 * control the given virtual address
277 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
280 * pgd_offset() returns a (pgd_t *)
281 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
283 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
286 * a shortcut which implies the use of the kernel's pgd, instead
289 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
292 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
294 * this macro returns the index of the entry in the pmd page which would
295 * control the given virtual address
297 #define pmd_index(address) \
298 (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
301 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
303 * this macro returns the index of the entry in the pte page which would
304 * control the given virtual address
306 #define pte_index(address) \
307 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
308 #define pte_offset_kernel(dir, address) \
309 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
311 #if defined(CONFIG_HIGHPTE)
312 #define pte_offset_map(dir, address) \
313 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address))
314 #define pte_offset_map_nested(dir, address) \
315 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address))
316 #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
317 #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
319 #define pte_offset_map(dir, address) \
320 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
321 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
322 #define pte_unmap(pte) do { } while (0)
323 #define pte_unmap_nested(pte) do { } while (0)
327 * The i386 doesn't have any external MMU info: the kernel page
328 * tables contain all the necessary information.
330 * Also, we only update the dirty/accessed state if we set
331 * the dirty bit by hand in the kernel, since the hardware
332 * will do the accessed bit for us, and we don't want to
333 * race with other CPU's that might be updating the dirty
334 * bit at the same time.
336 #define update_mmu_cache(vma,address,pte) do { } while (0)
337 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
338 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
341 (__ptep)->pte_low = (__entry).pte_low; \
342 flush_tlb_page(__vma, __address); \
346 /* Encode and de-code a swap entry */
347 #define __swp_type(x) (((x).val >> 1) & 0x1f)
348 #define __swp_offset(x) ((x).val >> 8)
349 #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
350 #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
351 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
353 #endif /* !__ASSEMBLY__ */
355 #ifndef CONFIG_DISCONTIGMEM
356 #define kern_addr_valid(addr) (1)
357 #endif /* !CONFIG_DISCONTIGMEM */
359 #define io_remap_page_range remap_page_range
361 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
362 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
363 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
364 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
365 #define __HAVE_ARCH_PTEP_MKDIRTY
366 #define __HAVE_ARCH_PTE_SAME
367 #include <asm-generic/pgtable.h>
370 * The size of the low 1:1 mappings we use during bootup,
371 * SMP-boot and ACPI-sleep:
373 #define LOW_MAPPINGS_SIZE (16*1024*1024)
376 #endif /* _I386_PGTABLE_H */