2 * linux/include/asm-arm/pgtable.h
4 * Copyright (C) 1995-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_PGTABLE_H
11 #define _ASMARM_PGTABLE_H
13 #include <asm/memory.h>
14 #include <asm/proc-fns.h>
15 #include <asm/arch/vmalloc.h>
18 * Hardware-wise, we have a two level page table structure, where the first
19 * level has 4096 entries, and the second level has 256 entries. Each entry
20 * is one 32-bit word. Most of the bits in the second level entry are used
21 * by hardware, and there aren't any "accessed" and "dirty" bits.
23 * Linux on the other hand has a three level page table structure, which can
24 * be wrapped to fit a two level page table structure easily - using the PGD
25 * and PTE only. However, Linux also expects one "PTE" table per page, and
26 * at least a "dirty" bit.
28 * Therefore, we tweak the implementation slightly - we tell Linux that we
29 * have 2048 entries in the first level, each of which is 8 bytes (iow, two
30 * hardware pointers to the second level.) The second level contains two
31 * hardware PTE tables arranged contiguously, followed by Linux versions
32 * which contain the state information Linux needs. We, therefore, end up
33 * with 512 entries in the "PTE" level.
35 * This leads to the page tables having the following layout:
40 * | |-----> +------------+ +0
41 * +- - - - + +4 | h/w pt 0 |
42 * | |-----> +------------+ +1024
43 * +--------+ +8 | h/w pt 1 |
44 * | | +------------+ +2048
45 * +- - - - + | Linux pt 0 |
46 * | | +------------+ +3072
47 * +--------+ | Linux pt 1 |
48 * | | +------------+ +4096
50 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
51 * PTE_xxx for definitions of bits appearing in the "h/w pt".
53 * PMD_xxx definitions refer to bits in the first level page table.
55 * The "dirty" bit is emulated by only granting hardware write permission
56 * iff the page is marked "writable" and "dirty" in the Linux PTE. This
57 * means that a write to a clean page will cause a permission fault, and
58 * the Linux MM layer will mark the page dirty via handle_pte_fault().
59 * For the hardware to notice the permission change, the TLB entry must
60 * be flushed, and ptep_establish() does that for us.
62 * The "accessed" or "young" bit is emulated by a similar method; we only
63 * allow accesses to the page if the "young" bit is set. Accesses to the
64 * page will cause a fault, and handle_pte_fault() will set the young bit
65 * for us as long as the page is marked present in the corresponding Linux
66 * PTE entry. Again, ptep_establish() will ensure that the TLB is up to
69 * However, when the "young" bit is cleared, we deny access to the page
70 * by clearing the hardware PTE. Currently Linux does not flush the TLB
71 * for us in this case, which means the TLB will retain the transation
72 * until either the TLB entry is evicted under pressure, or a context
73 * switch which changes the user space mapping occurs.
75 #define PTRS_PER_PTE 512
76 #define PTRS_PER_PMD 1
77 #define PTRS_PER_PGD 2048
80 * PMD_SHIFT determines the size of the area a second-level page table can map
81 * PGDIR_SHIFT determines what a third-level page table entry can map
84 #define PGDIR_SHIFT 21
86 #define LIBRARY_TEXT_START 0x0c000000
89 extern void __pte_error(const char *file, int line, unsigned long val);
90 extern void __pmd_error(const char *file, int line, unsigned long val);
91 extern void __pgd_error(const char *file, int line, unsigned long val);
93 #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
94 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
95 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
96 #endif /* !__ASSEMBLY__ */
98 #define PMD_SIZE (1UL << PMD_SHIFT)
99 #define PMD_MASK (~(PMD_SIZE-1))
100 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
101 #define PGDIR_MASK (~(PGDIR_SIZE-1))
103 #define FIRST_USER_PGD_NR 1
104 #define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
107 * Hardware page table definitions.
109 * + Level 1 descriptor (PMD)
112 #define PMD_TYPE_MASK (3 << 0)
113 #define PMD_TYPE_FAULT (0 << 0)
114 #define PMD_TYPE_TABLE (1 << 0)
115 #define PMD_TYPE_SECT (2 << 0)
116 #define PMD_BIT4 (1 << 4)
117 #define PMD_DOMAIN(x) ((x) << 5)
118 #define PMD_PROTECTION (1 << 9) /* v5 */
122 #define PMD_SECT_BUFFERABLE (1 << 2)
123 #define PMD_SECT_CACHEABLE (1 << 3)
124 #define PMD_SECT_AP_WRITE (1 << 10)
125 #define PMD_SECT_AP_READ (1 << 11)
126 #define PMD_SECT_TEX(x) ((x) << 12) /* v5 */
127 #define PMD_SECT_APX (1 << 15) /* v6 */
128 #define PMD_SECT_S (1 << 16) /* v6 */
129 #define PMD_SECT_nG (1 << 17) /* v6 */
131 #define PMD_SECT_UNCACHED (0)
132 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
133 #define PMD_SECT_WT (PMD_SECT_CACHEABLE)
134 #define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
135 #define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE)
136 #define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE)
139 * - coarse table (not used)
143 * + Level 2 descriptor (PTE)
146 #define PTE_TYPE_MASK (3 << 0)
147 #define PTE_TYPE_FAULT (0 << 0)
148 #define PTE_TYPE_LARGE (1 << 0)
149 #define PTE_TYPE_SMALL (2 << 0)
150 #define PTE_TYPE_EXT (3 << 0) /* v5 */
151 #define PTE_BUFFERABLE (1 << 2)
152 #define PTE_CACHEABLE (1 << 3)
155 * - extended small page/tiny page
157 #define PTE_EXT_AP_MASK (3 << 4)
158 #define PTE_EXT_AP_UNO_SRO (0 << 4)
159 #define PTE_EXT_AP_UNO_SRW (1 << 4)
160 #define PTE_EXT_AP_URO_SRW (2 << 4)
161 #define PTE_EXT_AP_URW_SRW (3 << 4)
162 #define PTE_EXT_TEX(x) ((x) << 6) /* v5 */
167 #define PTE_SMALL_AP_MASK (0xff << 4)
168 #define PTE_SMALL_AP_UNO_SRO (0x00 << 4)
169 #define PTE_SMALL_AP_UNO_SRW (0x55 << 4)
170 #define PTE_SMALL_AP_URO_SRW (0xaa << 4)
171 #define PTE_SMALL_AP_URW_SRW (0xff << 4)
174 * "Linux" PTE definitions.
176 * We keep two sets of PTEs - the hardware and the linux version.
177 * This allows greater flexibility in the way we map the Linux bits
178 * onto the hardware tables, and allows us to have YOUNG and DIRTY
181 * The PTE table pointer refers to the hardware entries; the "Linux"
182 * entries are stored 1024 bytes below.
184 #define L_PTE_PRESENT (1 << 0)
185 #define L_PTE_FILE (1 << 1) /* only when !PRESENT */
186 #define L_PTE_YOUNG (1 << 1)
187 #define L_PTE_BUFFERABLE (1 << 2) /* matches PTE */
188 #define L_PTE_CACHEABLE (1 << 3) /* matches PTE */
189 #define L_PTE_USER (1 << 4)
190 #define L_PTE_WRITE (1 << 5)
191 #define L_PTE_EXEC (1 << 6)
192 #define L_PTE_DIRTY (1 << 7)
196 #include <asm/domain.h>
198 #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
199 #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
202 * The following macros handle the cache and bufferable bits...
204 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_CACHEABLE | L_PTE_BUFFERABLE
205 #define _L_PTE_READ L_PTE_USER | L_PTE_EXEC
207 extern pgprot_t pgprot_kernel;
209 #define PAGE_NONE __pgprot(_L_PTE_DEFAULT)
210 #define PAGE_COPY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
211 #define PAGE_SHARED __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE)
212 #define PAGE_READONLY __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)
213 #define PAGE_KERNEL pgprot_kernel
215 #endif /* __ASSEMBLY__ */
218 * The table below defines the page protection levels that we insert into our
219 * Linux page table version. These get translated into the best that the
220 * architecture can perform. Note that on most ARM hardware:
221 * 1) We cannot do execute protection
222 * 2) If we could do execute protection, then read is implied
223 * 3) write implies read permissions
225 #define __P000 PAGE_NONE
226 #define __P001 PAGE_READONLY
227 #define __P010 PAGE_COPY
228 #define __P011 PAGE_COPY
229 #define __P100 PAGE_READONLY
230 #define __P101 PAGE_READONLY
231 #define __P110 PAGE_COPY
232 #define __P111 PAGE_COPY
234 #define __S000 PAGE_NONE
235 #define __S001 PAGE_READONLY
236 #define __S010 PAGE_SHARED
237 #define __S011 PAGE_SHARED
238 #define __S100 PAGE_READONLY
239 #define __S101 PAGE_READONLY
240 #define __S110 PAGE_SHARED
241 #define __S111 PAGE_SHARED
245 * ZERO_PAGE is a global shared page that is always zero: used
246 * for zero-mapped memory areas etc..
248 extern struct page *empty_zero_page;
249 #define ZERO_PAGE(vaddr) (empty_zero_page)
251 #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
252 #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
254 #define pte_none(pte) (!pte_val(pte))
255 #define pte_clear(ptep) set_pte((ptep), __pte(0))
256 #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
257 #define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
258 #define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
259 #define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr))
260 #define pte_unmap(pte) do { } while (0)
261 #define pte_unmap_nested(pte) do { } while (0)
263 #define set_pte(ptep, pte) cpu_set_pte(ptep,pte)
266 * The following only work if pte_present() is true.
267 * Undefined behaviour if not..
269 #define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
270 #define pte_read(pte) (pte_val(pte) & L_PTE_USER)
271 #define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
272 #define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
273 #define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
274 #define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
277 * The following only works if pte_present() is not true.
279 #define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
280 #define pte_to_pgoff(x) (pte_val(x) >> 2)
281 #define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)
283 #define PTE_FILE_MAX_BITS 30
285 #define PTE_BIT_FUNC(fn,op) \
286 static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
288 /*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
289 /*PTE_BIT_FUNC(mkread, |= L_PTE_USER);*/
290 PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
291 PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
292 PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC);
293 PTE_BIT_FUNC(mkexec, |= L_PTE_EXEC);
294 PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
295 PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
296 PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
297 PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
300 * Mark the prot value as uncacheable and unbufferable.
302 #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE))
303 #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~L_PTE_CACHEABLE)
305 #define pmd_none(pmd) (!pmd_val(pmd))
306 #define pmd_present(pmd) (pmd_val(pmd))
307 #define pmd_bad(pmd) (pmd_val(pmd) & 2)
309 #define set_pmd(pmdp,pmd) \
312 flush_pmd_entry(pmdp); \
315 #define copy_pmd(pmdpd,pmdps) \
317 pmdpd[0] = pmdps[0]; \
318 pmdpd[1] = pmdps[1]; \
319 flush_pmd_entry(pmdpd); \
322 #define pmd_clear(pmdp) \
324 pmdp[0] = __pmd(0); \
325 pmdp[1] = __pmd(0); \
326 clean_pmd_entry(pmdp); \
329 static inline pte_t *pmd_page_kernel(pmd_t pmd)
333 ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
334 ptr += PTRS_PER_PTE * sizeof(void *);
339 #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))
342 * Permanent address of a page. We never have highmem, so this is trivial.
344 #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))
347 * Conversion functions: convert a page and protection to a page entry,
348 * and a page entry and page directory to the page they refer to.
350 #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
353 * The "pgd_xxx()" functions here are trivial for a folded two-level
354 * setup: the pgd is never bad, and a pmd always exists (as it's folded
355 * into the pgd entry)
357 #define pgd_none(pgd) (0)
358 #define pgd_bad(pgd) (0)
359 #define pgd_present(pgd) (1)
360 #define pgd_clear(pgdp) do { } while (0)
361 #define set_pgd(pgd,pgdp) do { } while (0)
363 #define page_pte_prot(page,prot) mk_pte(page, prot)
364 #define page_pte(page) mk_pte(page, __pgprot(0))
366 /* to find an entry in a page-table-directory */
367 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
369 #define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
371 /* to find an entry in a kernel page-table-directory */
372 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
374 /* Find an entry in the second-level page table.. */
375 #define pmd_offset(dir, addr) ((pmd_t *)(dir))
377 /* Find an entry in the third-level page table.. */
378 #define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
380 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
382 const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
383 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
387 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
389 /* Encode and decode a swap entry.
391 * We support up to 32GB of swap on 4k machines
393 #define __swp_type(x) (((x).val >> 2) & 0x7f)
394 #define __swp_offset(x) ((x).val >> 9)
395 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
396 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
397 #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
399 /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
400 /* FIXME: this is not correct */
401 #define kern_addr_valid(addr) (1)
403 #include <asm-generic/pgtable.h>
406 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
408 #define HAVE_ARCH_UNMAPPED_AREA
411 * remap a physical address `phys' of size `size' with page protection `prot'
412 * into virtual address `from'
414 #define io_remap_page_range(vma,from,phys,size,prot) \
415 remap_page_range(vma,from,phys,size,prot)
417 #define pgtable_cache_init() do { } while (0)
419 #endif /* !__ASSEMBLY__ */
421 #endif /* _ASMARM_PGTABLE_H */