X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-generic%2Fpgtable.h;h=1f4ec7b702706ed643c799e034c128595b3527a7;hb=f7f1b0f1e2fbadeab12d24236000e778aa9b1ead;hp=29573197c3eebf695da3f776b08baacc837ef378;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 29573197c..1f4ec7b70 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -13,11 +13,19 @@ * Note: the old pte is known to not be writable, so we don't need to * worry about dirty bits etc getting lost. */ +#ifndef __HAVE_ARCH_SET_PTE_ATOMIC #define ptep_establish(__vma, __address, __ptep, __entry) \ do { \ - set_pte(__ptep, __entry); \ + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ flush_tlb_page(__vma, __address); \ } while (0) +#else /* __HAVE_ARCH_SET_PTE_ATOMIC */ +#define ptep_establish(__vma, __address, __ptep, __entry) \ +do { \ + set_pte_atomic(__ptep, __entry); \ + flush_tlb_page(__vma, __address); \ +} while (0) +#endif /* __HAVE_ARCH_SET_PTE_ATOMIC */ #endif #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS @@ -29,26 +37,30 @@ do { \ */ #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ do { \ - set_pte(__ptep, __entry); \ + set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ flush_tlb_page(__vma, __address); \ } while (0) #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(pte_t *ptep) -{ - pte_t pte = *ptep; - if (!pte_young(pte)) - return 0; - set_pte(ptep, pte_mkold(pte)); - return 1; -} +#define ptep_test_and_clear_young(__vma, __address, __ptep) \ +({ \ + pte_t __pte = *(__ptep); \ + int r = 1; \ + if (!pte_young(__pte)) \ + r = 0; \ + else \ + set_pte_at((__vma)->vm_mm, (__address), \ + (__ptep), pte_mkold(__pte)); \ + r; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define ptep_clear_flush_young(__vma, __address, __ptep) \ ({ \ - int __young = ptep_test_and_clear_young(__ptep); \ + int __young; \ + __young = ptep_test_and_clear_young(__vma, __address, __ptep); \ if (__young) \ flush_tlb_page(__vma, __address); \ __young; \ @@ -56,20 +68,24 @@ static inline int ptep_test_and_clear_young(pte_t *ptep) #endif #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -static inline int ptep_test_and_clear_dirty(pte_t *ptep) -{ - pte_t pte = *ptep; - if (!pte_dirty(pte)) - return 0; - set_pte(ptep, pte_mkclean(pte)); - return 1; -} +#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ +({ \ + pte_t __pte = *__ptep; \ + int r = 1; \ + if (!pte_dirty(__pte)) \ + r = 0; \ + else \ + set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ + pte_mkclean(__pte)); \ + r; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH #define ptep_clear_flush_dirty(__vma, __address, __ptep) \ ({ \ - int __dirty = ptep_test_and_clear_dirty(__ptep); \ + int __dirty; \ + __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ if (__dirty) \ flush_tlb_page(__vma, __address); \ __dirty; \ @@ -77,36 +93,29 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep) #endif #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR -static inline pte_t ptep_get_and_clear(pte_t *ptep) -{ - pte_t pte = *ptep; - pte_clear(ptep); - return pte; -} +#define ptep_get_and_clear(__mm, __address, __ptep) \ +({ \ + pte_t __pte = *(__ptep); \ + pte_clear((__mm), (__address), (__ptep)); \ + __pte; \ +}) #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH #define ptep_clear_flush(__vma, __address, __ptep) \ ({ \ - pte_t __pte = ptep_get_and_clear(__ptep); \ + pte_t __pte; \ + __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \ flush_tlb_page(__vma, __address); \ __pte; \ }) #endif #ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT -static inline void ptep_set_wrprotect(pte_t *ptep) -{ - pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -} -#endif - -#ifndef __HAVE_ARCH_PTEP_MKDIRTY -static inline void ptep_mkdirty(pte_t *ptep) +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif @@ -126,4 +135,77 @@ static inline void ptep_mkdirty(pte_t *ptep) #define pgd_offset_gate(mm, addr) pgd_offset(mm, addr) #endif +#ifndef __HAVE_ARCH_LAZY_MMU_PROT_UPDATE +#define lazy_mmu_prot_update(pte) do { } while (0) +#endif + +/* + * When walking page tables, get the address of the next boundary, + * or the end address of the range if that comes earlier. Although no + * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. + */ + +#define pgd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) + +#ifndef pud_addr_end +#define pud_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef pmd_addr_end +#define pmd_addr_end(addr, end) \ +({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ + (__boundary - 1 < (end) - 1)? __boundary: (end); \ +}) +#endif + +#ifndef __ASSEMBLY__ +/* + * When walking page tables, we usually want to skip any p?d_none entries; + * and any p?d_bad entries - reporting the error before resetting to none. + * Do the tests inline, but report and clear the bad entry in mm/memory.c. + */ +void pgd_clear_bad(pgd_t *); +void pud_clear_bad(pud_t *); +void pmd_clear_bad(pmd_t *); + +static inline int pgd_none_or_clear_bad(pgd_t *pgd) +{ + if (pgd_none(*pgd)) + return 1; + if (unlikely(pgd_bad(*pgd))) { + pgd_clear_bad(pgd); + return 1; + } + return 0; +} + +static inline int pud_none_or_clear_bad(pud_t *pud) +{ + if (pud_none(*pud)) + return 1; + if (unlikely(pud_bad(*pud))) { + pud_clear_bad(pud); + return 1; + } + return 0; +} + +static inline int pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} +#endif /* !__ASSEMBLY__ */ + #endif /* _ASM_GENERIC_PGTABLE_H */