X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-arm%2Fpgtable.h;h=b8cf2d5ec3041c7e3c5ff32a46078bca6b10e4aa;hb=refs%2Fheads%2Fvserver;hp=63889334b25b0128a013dec7ab4520bb7dae35d6;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-arm/pgtable.h b/include/asm-arm/pgtable.h index 63889334b..b8cf2d5ec 100644 --- a/include/asm-arm/pgtable.h +++ b/include/asm-arm/pgtable.h @@ -10,9 +10,35 @@ #ifndef _ASMARM_PGTABLE_H #define _ASMARM_PGTABLE_H -#include +#include #include + +#ifndef CONFIG_MMU + +#include "pgtable-nommu.h" + +#else + +#include #include +#include + +/* + * Just any arbitrary offset to the start of the vmalloc VM area: the + * current 8MB value just means that there will be a 8MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + * + * Note that platforms may override VMALLOC_START, but they must provide + * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, + * which may not overlap IO space. + */ +#ifndef VMALLOC_START +#define VMALLOC_OFFSET (8*1024*1024) +#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) +#endif /* * Hardware-wise, we have a two level page table structure, where the first @@ -80,7 +106,7 @@ * PMD_SHIFT determines the size of the area a second-level page table can map * PGDIR_SHIFT determines what a third-level page table entry can map */ -#define PMD_SHIFT 20 +#define PMD_SHIFT 21 #define PGDIR_SHIFT 21 #define LIBRARY_TEXT_START 0x0c000000 @@ -100,75 +126,29 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) -#define FIRST_USER_PGD_NR 1 -#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) - -/* - * Hardware page table definitions. - * - * + Level 1 descriptor (PMD) - * - common - */ -#define PMD_TYPE_MASK (3 << 0) -#define PMD_TYPE_FAULT (0 << 0) -#define PMD_TYPE_TABLE (1 << 0) -#define PMD_TYPE_SECT (2 << 0) -#define PMD_BIT4 (1 << 4) -#define PMD_DOMAIN(x) ((x) << 5) -#define PMD_PROTECTION (1 << 9) /* v5 */ -/* - * - section - */ -#define PMD_SECT_BUFFERABLE (1 << 2) -#define PMD_SECT_CACHEABLE (1 << 3) -#define PMD_SECT_AP_WRITE (1 << 10) -#define PMD_SECT_AP_READ (1 << 11) -#define PMD_SECT_TEX(x) ((x) << 12) /* v5 */ -#define PMD_SECT_APX (1 << 15) /* v6 */ -#define PMD_SECT_S (1 << 16) /* v6 */ -#define PMD_SECT_nG (1 << 17) /* v6 */ - -#define PMD_SECT_UNCACHED (0) -#define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE) -#define PMD_SECT_WT (PMD_SECT_CACHEABLE) -#define PMD_SECT_WB (PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) -#define PMD_SECT_MINICACHE (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE) -#define PMD_SECT_WBWA (PMD_SECT_TEX(1) | PMD_SECT_CACHEABLE | PMD_SECT_BUFFERABLE) - /* - * - coarse table (not used) + * This is the lowest virtual address we can permit any user space + * mapping to be mapped at. This is particularly important for + * non-high vector CPUs. */ +#define FIRST_USER_ADDRESS PAGE_SIZE -/* - * + Level 2 descriptor (PTE) - * - common - */ -#define PTE_TYPE_MASK (3 << 0) -#define PTE_TYPE_FAULT (0 << 0) -#define PTE_TYPE_LARGE (1 << 0) -#define PTE_TYPE_SMALL (2 << 0) -#define PTE_TYPE_EXT (3 << 0) /* v5 */ -#define PTE_BUFFERABLE (1 << 2) -#define PTE_CACHEABLE (1 << 3) +#define FIRST_USER_PGD_NR 1 +#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR) /* - * - extended small page/tiny page + * section address mask and size definitions. */ -#define PTE_EXT_AP_MASK (3 << 4) -#define PTE_EXT_AP_UNO_SRO (0 << 4) -#define PTE_EXT_AP_UNO_SRW (1 << 4) -#define PTE_EXT_AP_URO_SRW (2 << 4) -#define PTE_EXT_AP_URW_SRW (3 << 4) -#define PTE_EXT_TEX(x) ((x) << 6) /* v5 */ +#define SECTION_SHIFT 20 +#define SECTION_SIZE (1UL << SECTION_SHIFT) +#define SECTION_MASK (~(SECTION_SIZE-1)) /* - * - small page + * ARMv6 supersection address mask and size definitions. */ -#define PTE_SMALL_AP_MASK (0xff << 4) -#define PTE_SMALL_AP_UNO_SRO (0x00 << 4) -#define PTE_SMALL_AP_UNO_SRW (0x55 << 4) -#define PTE_SMALL_AP_URO_SRW (0xaa << 4) -#define PTE_SMALL_AP_URW_SRW (0xff << 4) +#define SUPERSECTION_SHIFT 24 +#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT) +#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1)) /* * "Linux" PTE definitions. @@ -190,14 +170,10 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define L_PTE_WRITE (1 << 5) #define L_PTE_EXEC (1 << 6) #define L_PTE_DIRTY (1 << 7) +#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */ #ifndef __ASSEMBLY__ -#include - -#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) -#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) - /* * The following macros handle the cache and bufferable bits... */ @@ -252,15 +228,19 @@ extern struct page *empty_zero_page; #define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))) #define pte_none(pte) (!pte_val(pte)) -#define pte_clear(ptep) set_pte((ptep), __pte(0)) +#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) -#define pte_offset_kernel(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) -#define pte_offset_map(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) -#define pte_offset_map_nested(dir,addr) (pmd_page_kernel(*(dir)) + __pte_index(addr)) +#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) +#define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) +#define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr)) #define pte_unmap(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0) -#define set_pte(ptep, pte) cpu_set_pte(ptep,pte) +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) + +#define set_pte_at(mm,addr,ptep,pteval) do { \ + set_pte_ext(ptep, pteval, (addr) >= PAGE_OFFSET ? 0 : PTE_EXT_NG); \ + } while (0) /* * The following only work if pte_present() is true. @@ -306,10 +286,11 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); #define pmd_present(pmd) (pmd_val(pmd)) #define pmd_bad(pmd) (pmd_val(pmd) & 2) -#define set_pmd(pmdp,pmd) \ +#define copy_pmd(pmdpd,pmdps) \ do { \ - *pmdp = pmd; \ - flush_pmd_entry(pmdp); \ + pmdpd[0] = pmdps[0]; \ + pmdpd[1] = pmdps[1]; \ + flush_pmd_entry(pmdpd); \ } while (0) #define pmd_clear(pmdp) \ @@ -319,7 +300,7 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); clean_pmd_entry(pmdp); \ } while (0) -static inline pte_t *pmd_page_kernel(pmd_t pmd) +static inline pte_t *pmd_page_vaddr(pmd_t pmd) { unsigned long ptr; @@ -353,9 +334,6 @@ static inline pte_t *pmd_page_kernel(pmd_t pmd) #define pgd_clear(pgdp) do { } while (0) #define set_pgd(pgd,pgdp) do { } while (0) -#define page_pte_prot(page,prot) mk_pte(page, prot) -#define page_pte(page) mk_pte(page, __pgprot(0)) - /* to find an entry in a page-table-directory */ #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) @@ -401,16 +379,20 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; #define HAVE_ARCH_UNMAPPED_AREA /* - * remap a physical address `phys' of size `size' with page protection `prot' + * remap a physical page `pfn' of size `size' with page protection `prot' * into virtual address `from' */ -#define io_remap_page_range(vma,from,phys,size,prot) \ - remap_page_range(vma,from,phys,size,prot) +#define io_remap_pfn_range(vma,from,pfn,size,prot) \ + remap_pfn_range(vma, from, pfn, size, prot) -typedef pte_t *pte_addr_t; +#define MK_IOSPACE_PFN(space, pfn) (pfn) +#define GET_IOSPACE(pfn) 0 +#define GET_PFN(pfn) (pfn) #define pgtable_cache_init() do { } while (0) #endif /* !__ASSEMBLY__ */ +#endif /* CONFIG_MMU */ + #endif /* _ASMARM_PGTABLE_H */