X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-s390%2Fpgtable.h;h=ae61aca5d483e25a5242a18ca69718de142c7d9c;hb=refs%2Fheads%2Fvserver;hp=1168f8626aa2a2d1f8b1f25c4bb04d2c45ce34d8;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h index 1168f8626..ae61aca5d 100644 --- a/include/asm-s390/pgtable.h +++ b/include/asm-s390/pgtable.h @@ -13,6 +13,8 @@ #ifndef _ASM_S390_PGTABLE_H #define _ASM_S390_PGTABLE_H +#include + /* * The Linux memory management assumes a three-level page table setup. For * s390 31 bit we "fold" the mid level into the top-level page table, so @@ -29,11 +31,12 @@ * the S390 page table tree. */ #ifndef __ASSEMBLY__ +#include #include #include -#include struct vm_area_struct; /* forward declaration (include/linux/mm.h) */ +struct mm_struct; extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096))); extern void paging_init(void); @@ -86,20 +89,7 @@ extern char empty_zero_page[PAGE_SIZE]; # define PTRS_PER_PGD 2048 #endif /* __s390x__ */ -/* - * pgd entries used up by user/kernel: - */ -#ifndef __s390x__ -# define USER_PTRS_PER_PGD 512 -# define USER_PGD_PTRS 512 -# define KERNEL_PGD_PTRS 512 -# define FIRST_USER_PGD_NR 0 -#else /* __s390x__ */ -# define USER_PTRS_PER_PGD 2048 -# define USER_PGD_PTRS 2048 -# define KERNEL_PGD_PTRS 2048 -# define FIRST_USER_PGD_NR 0 -#endif /* __s390x__ */ +#define FIRST_USER_ADDRESS 0 #define pte_ERROR(e) \ printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e)) @@ -117,16 +107,27 @@ extern char empty_zero_page[PAGE_SIZE]; * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ +extern unsigned long vmalloc_end; #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \ & ~(VMALLOC_OFFSET-1)) +#define VMALLOC_END vmalloc_end + +/* + * We need some free virtual space to be able to do vmalloc. + * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc + * area. On a machine with 2GB memory we make sure that we + * have at least 128MB free space for vmalloc. On a machine + * with 4TB we make sure we have at least 128GB. + */ #ifndef __s390x__ -# define VMALLOC_END (0x7fffffffL) +#define VMALLOC_MIN_SIZE 0x8000000UL +#define VMALLOC_END_INIT 0x80000000UL #else /* __s390x__ */ -# define VMALLOC_END (0x40000000000L) +#define VMALLOC_MIN_SIZE 0x2000000000UL +#define VMALLOC_END_INIT 0x40000000000UL #endif /* __s390x__ */ - /* * A 31 bit pagetable entry of S390 has following format: * | PFRA | | OS | @@ -210,15 +211,44 @@ extern char empty_zero_page[PAGE_SIZE]; */ /* Hardware bits in the page table entry */ -#define _PAGE_RO 0x200 /* HW read-only */ -#define _PAGE_INVALID 0x400 /* HW invalid */ +#define _PAGE_RO 0x200 /* HW read-only bit */ +#define _PAGE_INVALID 0x400 /* HW invalid bit */ +#define _PAGE_SWT 0x001 /* SW pte type bit t */ +#define _PAGE_SWX 0x002 /* SW pte type bit x */ + +/* Six different types of pages. */ +#define _PAGE_TYPE_EMPTY 0x400 +#define _PAGE_TYPE_NONE 0x401 +#define _PAGE_TYPE_SWAP 0x403 +#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ +#define _PAGE_TYPE_RO 0x200 +#define _PAGE_TYPE_RW 0x000 -/* Mask and four different kinds of invalid pages. */ -#define _PAGE_INVALID_MASK 0x601 -#define _PAGE_INVALID_EMPTY 0x400 -#define _PAGE_INVALID_NONE 0x401 -#define _PAGE_INVALID_SWAP 0x600 -#define _PAGE_INVALID_FILE 0x601 +/* + * PTE type bits are rather complicated. handle_pte_fault uses pte_present, + * pte_none and pte_file to find out the pte type WITHOUT holding the page + * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to + * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs + * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. + * This change is done while holding the lock, but the intermediate step + * of a previously valid pte with the hw invalid bit set can be observed by + * handle_pte_fault. That makes it necessary that all valid pte types with + * the hw invalid bit set must be distinguishable from the four pte types + * empty, none, swap and file. + * + * irxt ipte irxt + * _PAGE_TYPE_EMPTY 1000 -> 1000 + * _PAGE_TYPE_NONE 1001 -> 1001 + * _PAGE_TYPE_SWAP 1011 -> 1011 + * _PAGE_TYPE_FILE 11?1 -> 11?1 + * _PAGE_TYPE_RO 0100 -> 1100 + * _PAGE_TYPE_RW 0000 -> 1000 + * + * pte_none is true for bits combinations 1000, 1100 + * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 + * pte_file is true for bits combinations 1101, 1111 + * swap pte is 1011 and 0001, 0011, 0101, 0111, 1010 and 1110 are invalid. + */ #ifndef __s390x__ @@ -277,15 +307,14 @@ extern char empty_zero_page[PAGE_SIZE]; #endif /* __s390x__ */ /* - * No mapping available + * Page protection definitions. */ -#define PAGE_NONE_SHARED __pgprot(_PAGE_INVALID_NONE) -#define PAGE_NONE_PRIVATE __pgprot(_PAGE_INVALID_NONE) -#define PAGE_RO_SHARED __pgprot(_PAGE_RO) -#define PAGE_RO_PRIVATE __pgprot(_PAGE_RO) -#define PAGE_COPY __pgprot(_PAGE_RO) -#define PAGE_SHARED __pgprot(0) -#define PAGE_KERNEL __pgprot(0) +#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) +#define PAGE_RO __pgprot(_PAGE_TYPE_RO) +#define PAGE_RW __pgprot(_PAGE_TYPE_RW) + +#define PAGE_KERNEL PAGE_RW +#define PAGE_COPY PAGE_RO /* * The S390 can't do page protection for execute, and considers that the @@ -293,98 +322,102 @@ extern char empty_zero_page[PAGE_SIZE]; * the closest we can get.. */ /*xwr*/ -#define __P000 PAGE_NONE_PRIVATE -#define __P001 PAGE_RO_PRIVATE -#define __P010 PAGE_COPY -#define __P011 PAGE_COPY -#define __P100 PAGE_RO_PRIVATE -#define __P101 PAGE_RO_PRIVATE -#define __P110 PAGE_COPY -#define __P111 PAGE_COPY - -#define __S000 PAGE_NONE_SHARED -#define __S001 PAGE_RO_SHARED -#define __S010 PAGE_SHARED -#define __S011 PAGE_SHARED -#define __S100 PAGE_RO_SHARED -#define __S101 PAGE_RO_SHARED -#define __S110 PAGE_SHARED -#define __S111 PAGE_SHARED +#define __P000 PAGE_NONE +#define __P001 PAGE_RO +#define __P010 PAGE_RO +#define __P011 PAGE_RO +#define __P100 PAGE_RO +#define __P101 PAGE_RO +#define __P110 PAGE_RO +#define __P111 PAGE_RO + +#define __S000 PAGE_NONE +#define __S001 PAGE_RO +#define __S010 PAGE_RW +#define __S011 PAGE_RW +#define __S100 PAGE_RO +#define __S101 PAGE_RO +#define __S110 PAGE_RW +#define __S111 PAGE_RW /* * Certain architectures need to do special things when PTEs * within a page table are directly modified. Thus, the following * hook is made available. */ -extern inline void set_pte(pte_t *pteptr, pte_t pteval) +static inline void set_pte(pte_t *pteptr, pte_t pteval) { *pteptr = pteval; } +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) /* * pgd/pmd/pte query functions */ #ifndef __s390x__ -extern inline int pgd_present(pgd_t pgd) { return 1; } -extern inline int pgd_none(pgd_t pgd) { return 0; } -extern inline int pgd_bad(pgd_t pgd) { return 0; } +static inline int pgd_present(pgd_t pgd) { return 1; } +static inline int pgd_none(pgd_t pgd) { return 0; } +static inline int pgd_bad(pgd_t pgd) { return 0; } -extern inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } -extern inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } -extern inline int pmd_bad(pmd_t pmd) +static inline int pmd_present(pmd_t pmd) { return pmd_val(pmd) & _SEG_PRESENT; } +static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PAGE_TABLE_INV; } +static inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & (~PAGE_MASK & ~_PAGE_TABLE_INV)) != _PAGE_TABLE; } #else /* __s390x__ */ -extern inline int pgd_present(pgd_t pgd) +static inline int pgd_present(pgd_t pgd) { return (pgd_val(pgd) & ~PAGE_MASK) == _PGD_ENTRY; } -extern inline int pgd_none(pgd_t pgd) +static inline int pgd_none(pgd_t pgd) { return pgd_val(pgd) & _PGD_ENTRY_INV; } -extern inline int pgd_bad(pgd_t pgd) +static inline int pgd_bad(pgd_t pgd) { return (pgd_val(pgd) & (~PAGE_MASK & ~_PGD_ENTRY_INV)) != _PGD_ENTRY; } -extern inline int pmd_present(pmd_t pmd) +static inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) & ~PAGE_MASK) == _PMD_ENTRY; } -extern inline int pmd_none(pmd_t pmd) +static inline int pmd_none(pmd_t pmd) { return pmd_val(pmd) & _PMD_ENTRY_INV; } -extern inline int pmd_bad(pmd_t pmd) +static inline int pmd_bad(pmd_t pmd) { return (pmd_val(pmd) & (~PAGE_MASK & ~_PMD_ENTRY_INV)) != _PMD_ENTRY; } #endif /* __s390x__ */ -extern inline int pte_none(pte_t pte) +static inline int pte_none(pte_t pte) { - return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_EMPTY; + return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); } -extern inline int pte_present(pte_t pte) +static inline int pte_present(pte_t pte) { - return !(pte_val(pte) & _PAGE_INVALID) || - (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_NONE; + unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; + return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || + (!(pte_val(pte) & _PAGE_INVALID) && + !(pte_val(pte) & _PAGE_SWT)); } -extern inline int pte_file(pte_t pte) +static inline int pte_file(pte_t pte) { - return (pte_val(pte) & _PAGE_INVALID_MASK) == _PAGE_INVALID_FILE; + unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; + return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; } #define pte_same(a,b) (pte_val(a) == pte_val(b)) @@ -393,12 +426,12 @@ extern inline int pte_file(pte_t pte) * query functions pte_write/pte_dirty/pte_young only work if * pte_present() is true. Undefined behaviour if not.. */ -extern inline int pte_write(pte_t pte) +static inline int pte_write(pte_t pte) { return (pte_val(pte) & _PAGE_RO) == 0; } -extern inline int pte_dirty(pte_t pte) +static inline int pte_dirty(pte_t pte) { /* A pte is neither clean nor dirty on s/390. The dirty bit * is in the storage key. See page_test_and_clear_dirty for @@ -407,7 +440,7 @@ extern inline int pte_dirty(pte_t pte) return 0; } -extern inline int pte_young(pte_t pte) +static inline int pte_young(pte_t pte) { /* A pte is neither young nor old on s/390. The young bit * is in the storage key. See page_test_and_clear_young for @@ -416,15 +449,23 @@ extern inline int pte_young(pte_t pte) return 0; } +static inline int pte_read(pte_t pte) +{ + /* All pages are readable since we don't use the fetch + * protection bit in the storage key. + */ + return 1; +} + /* * pgd/pmd/pte modification functions */ #ifndef __s390x__ -extern inline void pgd_clear(pgd_t * pgdp) { } +static inline void pgd_clear(pgd_t * pgdp) { } -extern inline void pmd_clear(pmd_t * pmdp) +static inline void pmd_clear(pmd_t * pmdp) { pmd_val(pmdp[0]) = _PAGE_TABLE_INV; pmd_val(pmdp[1]) = _PAGE_TABLE_INV; @@ -434,12 +475,12 @@ extern inline void pmd_clear(pmd_t * pmdp) #else /* __s390x__ */ -extern inline void pgd_clear(pgd_t * pgdp) +static inline void pgd_clear(pgd_t * pgdp) { pgd_val(*pgdp) = _PGD_ENTRY_INV | _PGD_ENTRY; } -extern inline void pmd_clear(pmd_t * pmdp) +static inline void pmd_clear(pmd_t * pmdp) { pmd_val(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; pmd_val1(*pmdp) = _PMD_ENTRY_INV | _PMD_ENTRY; @@ -447,37 +488,37 @@ extern inline void pmd_clear(pmd_t * pmdp) #endif /* __s390x__ */ -extern inline void pte_clear(pte_t *ptep) +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_val(*ptep) = _PAGE_INVALID_EMPTY; + pte_val(*ptep) = _PAGE_TYPE_EMPTY; } /* * The following pte modification functions only work if * pte_present() is true. Undefined behaviour if not.. */ -extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) &= PAGE_MASK; pte_val(pte) |= pgprot_val(newprot); return pte; } -extern inline pte_t pte_wrprotect(pte_t pte) +static inline pte_t pte_wrprotect(pte_t pte) { - /* Do not clobber _PAGE_INVALID_NONE pages! */ + /* Do not clobber _PAGE_TYPE_NONE pages! */ if (!(pte_val(pte) & _PAGE_INVALID)) pte_val(pte) |= _PAGE_RO; return pte; } -extern inline pte_t pte_mkwrite(pte_t pte) +static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) &= ~_PAGE_RO; return pte; } -extern inline pte_t pte_mkclean(pte_t pte) +static inline pte_t pte_mkclean(pte_t pte) { /* The only user of pte_mkclean is the fork() code. We must *not* clear the *physical* page dirty bit @@ -486,7 +527,7 @@ extern inline pte_t pte_mkclean(pte_t pte) return pte; } -extern inline pte_t pte_mkdirty(pte_t pte) +static inline pte_t pte_mkdirty(pte_t pte) { /* We do not explicitly set the dirty bit because the * sske instruction is slow. It is faster to let the @@ -495,7 +536,7 @@ extern inline pte_t pte_mkdirty(pte_t pte) return pte; } -extern inline pte_t pte_mkold(pte_t pte) +static inline pte_t pte_mkold(pte_t pte) { /* S/390 doesn't keep its dirty/referenced bit in the pte. * There is no point in clearing the real referenced bit. @@ -503,7 +544,7 @@ extern inline pte_t pte_mkold(pte_t pte) return pte; } -extern inline pte_t pte_mkyoung(pte_t pte) +static inline pte_t pte_mkyoung(pte_t pte) { /* S/390 doesn't keep its dirty/referenced bit in the pte. * There is no point in setting the real referenced bit. @@ -511,7 +552,7 @@ extern inline pte_t pte_mkyoung(pte_t pte) return pte; } -static inline int ptep_test_and_clear_young(pte_t *ptep) +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return 0; } @@ -521,10 +562,10 @@ ptep_clear_flush_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* No need to flush TLB; bits are in storage key */ - return ptep_test_and_clear_young(ptep); + return ptep_test_and_clear_young(vma, address, ptep); } -static inline int ptep_test_and_clear_dirty(pte_t *ptep) +static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { return 0; } @@ -534,58 +575,62 @@ ptep_clear_flush_dirty(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { /* No need to flush TLB; bits are in storage key */ - return ptep_test_and_clear_dirty(ptep); + return ptep_test_and_clear_dirty(vma, address, ptep); } -static inline pte_t ptep_get_and_clear(pte_t *ptep) +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { pte_t pte = *ptep; - pte_clear(ptep); + pte_clear(mm, addr, ptep); return pte; } -static inline pte_t -ptep_clear_flush(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep) +static inline void __ptep_ipte(unsigned long address, pte_t *ptep) { - pte_t pte = *ptep; + if (!(pte_val(*ptep) & _PAGE_INVALID)) { #ifndef __s390x__ - if (!(pte_val(pte) & _PAGE_INVALID)) { /* S390 has 1mb segments, we are emulating 4MB segments */ pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); - __asm__ __volatile__ ("ipte %2,%3" - : "=m" (*ptep) : "m" (*ptep), - "a" (pto), "a" (address) ); +#else + /* ipte in zarch mode can do the math */ + pte_t *pto = ptep; +#endif + asm volatile( + " ipte %2,%3" + : "=m" (*ptep) : "m" (*ptep), + "a" (pto), "a" (address)); } -#else /* __s390x__ */ - if (!(pte_val(pte) & _PAGE_INVALID)) - __asm__ __volatile__ ("ipte %2,%3" - : "=m" (*ptep) : "m" (*ptep), - "a" (ptep), "a" (address) ); -#endif /* __s390x__ */ - pte_clear(ptep); - return pte; + pte_val(*ptep) = _PAGE_TYPE_EMPTY; } -static inline void ptep_set_wrprotect(pte_t *ptep) +static inline pte_t +ptep_clear_flush(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) { - pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); + pte_t pte = *ptep; + + __ptep_ipte(address, ptep); + return pte; } -static inline void ptep_mkdirty(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - pte_mkdirty(*ptep); + pte_t old_pte = *ptep; + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); } static inline void ptep_establish(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, pte_t entry) + unsigned long address, pte_t *ptep, + pte_t entry) { ptep_clear_flush(vma, address, ptep); set_pte(ptep, entry); } +#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ + ptep_establish(__vma, __address, __ptep, __entry) + /* * Test and clear dirty bit in storage key. * We can't clear the changed bit atomically. This is a potential @@ -593,34 +638,31 @@ ptep_establish(struct vm_area_struct *vma, * should therefore only be called if it is not mapped in any * address space. */ -#define page_test_and_clear_dirty(page) \ -({ \ - struct page *__page = (page); \ - unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ - int __skey; \ - asm volatile ("iske %0,%1" : "=d" (__skey) : "a" (__physpage)); \ - if (__skey & _PAGE_CHANGED) { \ - asm volatile ("sske %0,%1" \ - : : "d" (__skey & ~_PAGE_CHANGED), \ - "a" (__physpage)); \ - } \ - (__skey & _PAGE_CHANGED); \ -}) +static inline int page_test_and_clear_dirty(struct page *page) +{ + unsigned long physpage = page_to_phys(page); + int skey = page_get_storage_key(physpage); + + if (skey & _PAGE_CHANGED) + page_set_storage_key(physpage, skey & ~_PAGE_CHANGED); + return skey & _PAGE_CHANGED; +} /* * Test and clear referenced bit in storage key. */ -#define page_test_and_clear_young(page) \ -({ \ - struct page *__page = (page); \ - unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ - int __ccode; \ - asm volatile ("rrbe 0,%1\n\t" \ - "ipm %0\n\t" \ - "srl %0,28\n\t" \ - : "=d" (__ccode) : "a" (__physpage) : "cc" ); \ - (__ccode & 2); \ -}) +static inline int page_test_and_clear_young(struct page *page) +{ + unsigned long physpage = page_to_phys(page); + int ccode; + + asm volatile( + " rrbe 0,%1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (ccode) : "a" (physpage) : "cc" ); + return ccode & 2; +} /* * Conversion functions: convert a page and protection to a page entry, @@ -633,52 +675,44 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) return __pte; } -#define mk_pte(pg, pgprot) \ -({ \ - struct page *__page = (pg); \ - pgprot_t __pgprot = (pgprot); \ - unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \ - pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ - __pte; \ -}) - -#define pfn_pte(pfn, pgprot) \ -({ \ - pgprot_t __pgprot = (pgprot); \ - unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ - pte_t __pte = mk_pte_phys(__physpage, __pgprot); \ - __pte; \ -}) - -#define arch_set_page_uptodate(__page) \ - do { \ - asm volatile ("sske %0,%1" : : "d" (0), \ - "a" (__pa((__page-mem_map) << PAGE_SHIFT)));\ - } while (0) +static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) +{ + unsigned long physpage = page_to_phys(page); + + return mk_pte_phys(physpage, pgprot); +} + +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) +{ + unsigned long physpage = __pa((pfn) << PAGE_SHIFT); + + return mk_pte_phys(physpage, pgprot); +} #ifdef __s390x__ -#define pfn_pmd(pfn, pgprot) \ -({ \ - pgprot_t __pgprot = (pgprot); \ - unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \ - pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \ - __pmd; \ -}) +static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) +{ + unsigned long physpage = __pa((pfn) << PAGE_SHIFT); + + return __pmd(physpage + pgprot_val(pgprot)); +} #endif /* __s390x__ */ #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) #define pte_page(x) pfn_to_page(pte_pfn(x)) -#define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK) +#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) + +#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) -#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT)) +#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) -#define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK) +#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) /* to find an entry in a page-table-directory */ -#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) /* to find an entry in a kernel page-table-directory */ @@ -687,7 +721,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) #ifndef __s390x__ /* Find an entry in the second-level page table.. */ -extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) +static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) { return (pmd_t *) dir; } @@ -697,14 +731,14 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) /* Find an entry in the second-level page table.. */ #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) #define pmd_offset(dir,addr) \ - ((pmd_t *) pgd_page_kernel(*(dir)) + pmd_index(addr)) + ((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(addr)) #endif /* __s390x__ */ /* Find an entry in the third-level page table.. */ #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) #define pte_offset_kernel(pmd, address) \ - ((pte_t *) pmd_page_kernel(*(pmd)) + pte_index(address)) + ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) #define pte_unmap(pte) do { } while (0) @@ -745,11 +779,17 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address) * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 */ -extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) +#ifndef __s390x__ +#define __SWP_OFFSET_MASK (~0UL >> 12) +#else +#define __SWP_OFFSET_MASK (~0UL >> 11) +#endif +static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) { pte_t pte; - pte_val(pte) = _PAGE_INVALID_SWAP | ((type & 0x1f) << 2) | - ((offset & 1) << 7) | ((offset & 0xffffe) << 11); + offset &= __SWP_OFFSET_MASK; + pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | + ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); return pte; } @@ -760,8 +800,6 @@ extern inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) -typedef pte_t *pte_addr_t; - #ifndef __s390x__ # define PTE_FILE_MAX_BITS 26 #else /* __s390x__ */ @@ -773,22 +811,25 @@ typedef pte_t *pte_addr_t; #define pgoff_to_pte(__off) \ ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ - | _PAGE_INVALID_FILE }) + | _PAGE_TYPE_FILE }) #endif /* !__ASSEMBLY__ */ #define kern_addr_valid(addr) (1) +extern int add_shared_memory(unsigned long start, unsigned long size); +extern int remove_shared_memory(unsigned long start, unsigned long size); + /* * No page table caches to initialise */ #define pgtable_cache_init() do { } while (0) -#ifdef __s390x__ -# define HAVE_ARCH_UNMAPPED_AREA -#endif /* __s390x__ */ +#define __HAVE_ARCH_MEMMAP_INIT +extern void memmap_init(unsigned long, int, unsigned long, unsigned long); #define __HAVE_ARCH_PTEP_ESTABLISH +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY @@ -796,7 +837,6 @@ typedef pte_t *pte_addr_t; #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_CLEAR_FLUSH #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG