X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ia64%2Fpgtable.h;h=fcc9c3344ab40f3ebd3891eb273fd1898f9e3fdc;hb=f7f1b0f1e2fbadeab12d24236000e778aa9b1ead;hp=594f73f8b5744bb595d92c83641790a3c623d28f;hpb=e3f6fb6212a7102bdb56ba38fa1e98fe72950475;p=linux-2.6.git diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 594f73f8b..fcc9c3344 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -8,7 +8,7 @@ * This hopefully works with any (fixed) IA-64 page-size, as defined * in . * - * Copyright (C) 1998-2004 Hewlett-Packard Co + * Copyright (C) 1998-2005 Hewlett-Packard Co * David Mosberger-Tang */ @@ -93,7 +93,7 @@ #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ -#define FIRST_USER_PGD_NR 0 +#define FIRST_USER_ADDRESS 0 /* * Definitions for second level: @@ -202,6 +202,7 @@ ia64_phys_addr_valid (unsigned long addr) * the PTE in a page table. Nothing special needs to be on IA-64. */ #define set_pte(ptep, pteval) (*(ptep) = (pteval)) +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define RGN_SIZE (1UL << 61) #define RGN_KERNEL 7 @@ -243,7 +244,7 @@ ia64_phys_addr_valid (unsigned long addr) #define pte_none(pte) (!pte_val(pte)) #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) -#define pte_clear(pte) (pte_val(*(pte)) = 0UL) +#define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) /* pte_page() returns the "struct page *" corresponding to the PTE: */ #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET)) @@ -345,7 +346,7 @@ pgd_offset (struct mm_struct *mm, unsigned long address) /* atomic versions of the some PTE manipulations: */ static inline int -ptep_test_and_clear_young (pte_t *ptep) +ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_young(*ptep)) @@ -355,13 +356,13 @@ ptep_test_and_clear_young (pte_t *ptep) pte_t pte = *ptep; if (!pte_young(pte)) return 0; - set_pte(ptep, pte_mkold(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); return 1; #endif } static inline int -ptep_test_and_clear_dirty (pte_t *ptep) +ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP if (!pte_dirty(*ptep)) @@ -371,25 +372,25 @@ ptep_test_and_clear_dirty (pte_t *ptep) pte_t pte = *ptep; if (!pte_dirty(pte)) return 0; - set_pte(ptep, pte_mkclean(pte)); + set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); return 1; #endif } static inline pte_t -ptep_get_and_clear (pte_t *ptep) +ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP return __pte(xchg((long *) ptep, 0)); #else pte_t pte = *ptep; - pte_clear(ptep); + pte_clear(mm, addr, ptep); return pte; #endif } static inline void -ptep_set_wrprotect (pte_t *ptep) +ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { #ifdef CONFIG_SMP unsigned long new, old; @@ -400,18 +401,7 @@ ptep_set_wrprotect (pte_t *ptep) } while (cmpxchg((unsigned long *) ptep, old, new) != old); #else pte_t old_pte = *ptep; - set_pte(ptep, pte_wrprotect(old_pte)); -#endif -} - -static inline void -ptep_mkdirty (pte_t *ptep) -{ -#ifdef CONFIG_SMP - set_bit(_PAGE_D_BIT, ptep); -#else - pte_t old_pte = *ptep; - set_pte(ptep, pte_mkdirty(old_pte)); + set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); #endif } @@ -421,6 +411,8 @@ pte_same (pte_t a, pte_t b) return pte_val(a) == pte_val(b); } +#define update_mmu_cache(vma, address, pte) do { } while (0) + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern void paging_init (void); @@ -457,6 +449,13 @@ extern void paging_init (void); #define io_remap_page_range(vma, vaddr, paddr, size, prot) \ remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + +#define MK_IOSPACE_PFN(space, pfn) (pfn) +#define GET_IOSPACE(pfn) 0 +#define GET_PFN(pfn) (pfn) + /* * ZERO_PAGE is a global shared page that is always zero: used * for zero-mapped memory areas etc.. @@ -473,8 +472,8 @@ extern struct page *zero_page_memmap_ptr; #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) struct mmu_gather; -extern void hugetlb_free_pgtables(struct mmu_gather *tlb, - struct vm_area_struct * prev, unsigned long start, unsigned long end); +void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, + unsigned long end, unsigned long floor, unsigned long ceiling); #endif /* @@ -482,7 +481,7 @@ extern void hugetlb_free_pgtables(struct mmu_gather *tlb, * information. However, we use this routine to take care of any (delayed) i-cache * flushing that may be necessary. */ -extern void update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte); +extern void lazy_mmu_prot_update (pte_t pte); #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* @@ -552,16 +551,21 @@ do { \ /* These tell get_user_pages() that the first gate page is accessible from user-level. */ #define FIXADDR_USER_START GATE_ADDR -#define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) +#ifdef HAVE_BUGGY_SEGREL +# define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE) +#else +# define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) +#endif #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT -#define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME #define __HAVE_ARCH_PGD_OFFSET_GATE -#include +#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE + #include +#include #endif /* _ASM_IA64_PGTABLE_H */