X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-parisc%2Fpgtable.h;h=5066c54dae0ab422cf2dad44dff9a95007ff8f4f;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=1593a8a7a5fbc73d2abe3ffec8008ee1a7329e88;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h index 1593a8a7a..5066c54da 100644 --- a/include/asm-parisc/pgtable.h +++ b/include/asm-parisc/pgtable.h @@ -1,7 +1,8 @@ #ifndef _PARISC_PGTABLE_H #define _PARISC_PGTABLE_H -#include +#include + #include #ifndef __ASSEMBLY__ @@ -10,6 +11,7 @@ */ #include +#include /* for vm_area_struct */ #include #include #include @@ -37,6 +39,7 @@ do{ \ *(pteptr) = (pteval); \ } while(0) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #endif /* !__ASSEMBLY__ */ @@ -55,16 +58,15 @@ #define ISTACK_SIZE 32768 /* Interrupt Stack Size */ #define ISTACK_ORDER 3 -/* This is the size of the initially mapped kernel memory (i.e. currently - * 0 to 1<<23 == 8MB */ +/* This is the size of the initially mapped kernel memory */ #ifdef CONFIG_64BIT -#define KERNEL_INITIAL_ORDER 24 +#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ #else -#define KERNEL_INITIAL_ORDER 23 +#define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */ #endif #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) #define PT_NLEVELS 3 #define PGD_ORDER 1 /* Number of pages per pgd */ #define PMD_ORDER 1 /* Number of pages per pmd */ @@ -107,17 +109,21 @@ #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) #define MAX_ADDRESS (1UL << MAX_ADDRBITS) -#define SPACEID_SHIFT (MAX_ADDRBITS - 32) +#define SPACEID_SHIFT (MAX_ADDRBITS - 32) /* This calculates the number of initial pages we need for the initial * page tables */ -#define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) +#if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) +# define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) +#else +# define PT_INITIAL (1) /* all initial PTEs fit into one page */ +#endif /* * pgd entries used up by user/kernel: */ -#define FIRST_USER_PGD_NR 0 +#define FIRST_USER_ADDRESS 0 #ifndef __ASSEMBLY__ extern void *vmalloc_start; @@ -156,6 +162,10 @@ extern void *vmalloc_start; * to zero */ #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) +/* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ +#define PFN_PTE_SHIFT 12 + + /* this is how many bits may be used by the file functions */ #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) @@ -184,7 +194,8 @@ extern void *vmalloc_start; /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds * are page-aligned, we don't care about the PAGE_OFFSET bits, except * for a few meta-information bits, so we shift the address to be - * able to effectively address 40-bits of physical address space. */ + * able to effectively address 40/42/44-bits of physical address space + * depending on 4k/16k/64k PAGE_SIZE */ #define _PxD_PRESENT_BIT 31 #define _PxD_ATTACHED_BIT 30 #define _PxD_VALID_BIT 29 @@ -194,7 +205,7 @@ extern void *vmalloc_start; #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) #define PxD_FLAG_MASK (0xf) #define PxD_FLAG_SHIFT (4) -#define PxD_VALUE_SHIFT (8) +#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ #ifndef __ASSEMBLY__ @@ -209,7 +220,7 @@ extern void *vmalloc_start; #define PAGE_COPY PAGE_EXECREAD #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) -#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED) +#define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) #define PAGE_FLUSH __pgprot(_PAGE_FLUSH) @@ -242,6 +253,7 @@ extern void *vmalloc_start; #define __S110 PAGE_RWX #define __S111 PAGE_RWX + extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ /* initial page tables for 0-8MB for kernel */ @@ -261,14 +273,14 @@ extern unsigned long *empty_zero_page; #define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) -#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0) +#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 /* The first entry of the permanent pmd is not there if it contains * the gateway marker */ #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) @@ -278,7 +290,7 @@ extern unsigned long *empty_zero_page; #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) static inline void pmd_clear(pmd_t *pmd) { -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) /* This is the entry pointing to the permanent pmd * attached to the pgd; cannot clear it */ @@ -299,7 +311,7 @@ static inline void pmd_clear(pmd_t *pmd) { #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) static inline void pgd_clear(pgd_t *pgd) { -#ifdef CONFIG_64BIT +#if PT_NLEVELS == 3 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) /* This is the permanent pmd attached to the pgd; cannot * free it */ @@ -347,7 +359,7 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return ({ \ pte_t __pte; \ \ - pte_val(__pte) = ((addr)+pgprot_val(pgprot)); \ + pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<> PAGE_SHIFT) +#define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) @@ -415,7 +423,6 @@ extern void paging_init (void); #define PG_dcache_dirty PG_arch_1 -struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); /* Encode and de-code a swap entry */ @@ -429,7 +436,7 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_young(*ptep)) @@ -439,12 +446,12 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) pte_t pte = *ptep; if (!pte_young(pte)) return 0; - set_pte(ptep, pte_mkold(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; #endif } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_dirty(*ptep)) @@ -454,14 +461,15 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep) pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; - set_pte(ptep, pte_mkclean(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); return 1; #endif } extern spinlock_t pa_dbit_lock; -static inline pte_t ptep_get_and_clear(pte_t *ptep) +struct mm_struct; +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t old_pte; pte_t pte; @@ -470,13 +478,13 @@ static inline pte_t ptep_get_and_clear(pte_t *ptep) pte = old_pte = *ptep; pte_val(pte) &= ~_PAGE_PRESENT; pte_val(pte) |= _PAGE_FLUSH; - set_pte(ptep,pte); + set_pte_at(mm,addr,ptep,pte); spin_unlock(&pa_dbit_lock); return old_pte; } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP unsigned long new, old; @@ -487,17 +495,7 @@ static inline void ptep_set_wrprotect(pte_t *ptep) } while (cmpxchg((unsigned long *) ptep, old, new) != old); #else pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -#endif -} - -static inline void ptep_mkdirty(pte_t *ptep) -{ -#ifdef CONFIG_SMP - set_bit(xlate_pabit(_PAGE_DIRTY_BIT), &pte_val(*ptep)); -#else - pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); #endif } @@ -505,7 +503,34 @@ static inline void ptep_mkdirty(pte_t *ptep) #endif /* !__ASSEMBLY__ */ -#define io_remap_page_range remap_page_range + +/* TLB page size encoding - see table 3-1 in parisc20.pdf */ +#define _PAGE_SIZE_ENCODING_4K 0 +#define _PAGE_SIZE_ENCODING_16K 1 +#define _PAGE_SIZE_ENCODING_64K 2 +#define _PAGE_SIZE_ENCODING_256K 3 +#define _PAGE_SIZE_ENCODING_1M 4 +#define _PAGE_SIZE_ENCODING_4M 5 +#define _PAGE_SIZE_ENCODING_16M 6 +#define _PAGE_SIZE_ENCODING_64M 7 + +#if defined(CONFIG_PARISC_PAGE_SIZE_4KB) +# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K +#elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) +# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K +#elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) +# define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K +#endif + + +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) + +#define MK_IOSPACE_PFN(space, pfn) (pfn) +#define GET_IOSPACE(pfn) 0 +#define GET_PFN(pfn) (pfn) /* We provide our own get_unmapped_area to provide cache coherency */ @@ -515,7 +540,6 @@ static inline void ptep_mkdirty(pte_t *ptep) #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #include