X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ia64%2Fpgtable.h;h=085ef89dfb7b3b94bb2cbac616ed0e05f0a20781;hb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;hp=83a57662f83e36473d7d51ab6e9afbaeede12099;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h index 83a57662f..085ef89df 100644 --- a/include/asm-ia64/pgtable.h +++ b/include/asm-ia64/pgtable.h @@ -8,7 +8,7 @@ * This hopefully works with any (fixed) IA-64 page-size, as defined * in (currently 8192). * - * Copyright (C) 1998-2003 Hewlett-Packard Co + * Copyright (C) 1998-2004 Hewlett-Packard Co * David Mosberger-Tang */ @@ -102,7 +102,7 @@ * can map. */ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) -#define PMD_SIZE (__IA64_UL(1) << PMD_SHIFT) +#define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PTRS_PER_PMD (__IA64_UL(1) << (PAGE_SHIFT-3)) @@ -206,18 +206,18 @@ ia64_phys_addr_valid (unsigned long addr) #define RGN_SIZE (1UL << 61) #define RGN_KERNEL 7 -#define VMALLOC_START 0xa000000200000000 +#define VMALLOC_START 0xa000000200000000UL #ifdef CONFIG_VIRTUAL_MEM_MAP -# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) +# define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) # define VMALLOC_END vmalloc_end extern unsigned long vmalloc_end; #else -# define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9))) +# define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) #endif /* fs/proc/kcore.c */ -#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000) -#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000) +#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL) +#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL) /* * Conversion functions: convert page frame number (pfn) and a protection value to a page @@ -297,11 +297,7 @@ ia64_phys_addr_valid (unsigned long addr) * works bypasses the caches, but does allow for consecutive writes to * be combined into single (but larger) write transactions. */ -#ifdef CONFIG_MCKINLEY_A0_SPECIFIC -# define pgprot_writecombine(prot) prot -#else -# define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC) -#endif +#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC) static inline unsigned long pgd_index (unsigned long address) @@ -325,6 +321,11 @@ pgd_offset (struct mm_struct *mm, unsigned long address) #define pgd_offset_k(addr) \ (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) +/* Look up a pgd entry in the gate area. On IA-64, the gate-area + resides in the kernel-mapped segment, hence we use pgd_offset_k() + here. */ +#define pgd_offset_gate(mm, addr) pgd_offset_k(addr) + /* Find an entry in the second-level page table.. */ #define pmd_offset(dir,addr) \ ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) @@ -346,6 +347,8 @@ static inline int ptep_test_and_clear_young (pte_t *ptep) { #ifdef CONFIG_SMP + if (!pte_young(*ptep)) + return 0; return test_and_clear_bit(_PAGE_A_BIT, ptep); #else pte_t pte = *ptep; @@ -360,6 +363,8 @@ static inline int ptep_test_and_clear_dirty (pte_t *ptep) { #ifdef CONFIG_SMP + if (!pte_dirty(*ptep)) + return 0; return test_and_clear_bit(_PAGE_D_BIT, ptep); #else pte_t pte = *ptep; @@ -469,8 +474,6 @@ extern void hugetlb_free_pgtables(struct mmu_gather *tlb, struct vm_area_struct * prev, unsigned long start, unsigned long end); #endif -typedef pte_t *pte_addr_t; - /* * IA-64 doesn't have any external MMU info: the page tables contain all the necessary * information. However, we use this routine to take care of any (delayed) i-cache @@ -478,10 +481,46 @@ typedef pte_t *pte_addr_t; */ extern void update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte); +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +/* + * Update PTEP with ENTRY, which is guaranteed to be a less + * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and + * WRITABLE bits turned on, when the value at PTEP did not. The + * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE. + * + * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without + * having to worry about races. On SMP machines, there are only two + * cases where this is true: + * + * (1) *PTEP has the PRESENT bit turned OFF + * (2) ENTRY has the DIRTY bit turned ON + * + * On ia64, we could implement this routine with a cmpxchg()-loop + * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY. + * However, like on x86, we can get a more streamlined version by + * observing that it is OK to drop ACCESSED bit updates when + * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is + * result in an extra Access-bit fault, which would then turn on the + * ACCESSED bit in the low-level fault handler (iaccess_bit or + * daccess_bit in ivt.S). + */ +#ifdef CONFIG_SMP +# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ +do { \ + if (__safely_writable) { \ + set_pte(__ptep, __entry); \ + flush_tlb_page(__vma, __addr); \ + } \ +} while (0) +#else +# define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ + ptep_establish(__vma, __addr, __ptep, __entry) +#endif + # ifdef CONFIG_VIRTUAL_MEM_MAP /* arch mem_map init routine is needed due to holes in a virtual mem_map */ # define __HAVE_ARCH_MEMMAP_INIT - extern void memmap_init (struct page *start, unsigned long size, int nid, unsigned long zone, + extern void memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn); # endif /* CONFIG_VIRTUAL_MEM_MAP */ # endif /* !__ASSEMBLY__ */ @@ -518,6 +557,7 @@ extern void update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, p #define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTEP_MKDIRTY #define __HAVE_ARCH_PTE_SAME +#define __HAVE_ARCH_PGD_OFFSET_GATE #include #endif /* _ASM_IA64_PGTABLE_H */