#define RGN_SIZE (1UL << 61)
#define RGN_KERNEL 7
-#define VMALLOC_START 0xa000000200000000
+#define VMALLOC_START 0xa000000200000000UL
#ifdef CONFIG_VIRTUAL_MEM_MAP
-# define VMALLOC_END_INIT (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
+# define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
# define VMALLOC_END vmalloc_end
extern unsigned long vmalloc_end;
#else
-# define VMALLOC_END (0xa000000000000000 + (1UL << (4*PAGE_SHIFT - 9)))
+# define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9)))
#endif
/* fs/proc/kcore.c */
-#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000)
-#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000)
+#define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL)
+#define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL)
/*
* Conversion functions: convert page frame number (pfn) and a protection value to a page
* works bypasses the caches, but does allow for consecutive writes to
* be combined into single (but larger) write transactions.
*/
-#ifdef CONFIG_MCKINLEY_A0_SPECIFIC
-# define pgprot_writecombine(prot) prot
-#else
-# define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
-#endif
+#define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
static inline unsigned long
pgd_index (unsigned long address)
#define pgd_offset_k(addr) \
(init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
+/* Look up a pgd entry in the gate area. On IA-64, the gate-area
+ resides in the kernel-mapped segment, hence we use pgd_offset_k()
+ here. */
+#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
+
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir,addr) \
((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
ptep_test_and_clear_young (pte_t *ptep)
{
#ifdef CONFIG_SMP
+ if (!pte_young(*ptep))
+ return 0;
return test_and_clear_bit(_PAGE_A_BIT, ptep);
#else
pte_t pte = *ptep;
ptep_test_and_clear_dirty (pte_t *ptep)
{
#ifdef CONFIG_SMP
+ if (!pte_dirty(*ptep))
+ return 0;
return test_and_clear_bit(_PAGE_D_BIT, ptep);
#else
pte_t pte = *ptep;
# ifdef CONFIG_VIRTUAL_MEM_MAP
/* arch mem_map init routine is needed due to holes in a virtual mem_map */
# define __HAVE_ARCH_MEMMAP_INIT
- extern void memmap_init (struct page *start, unsigned long size, int nid, unsigned long zone,
+ extern void memmap_init (unsigned long size, int nid, unsigned long zone,
unsigned long start_pfn);
# endif /* CONFIG_VIRTUAL_MEM_MAP */
# endif /* !__ASSEMBLY__ */
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
#define __HAVE_ARCH_PTEP_MKDIRTY
#define __HAVE_ARCH_PTE_SAME
+#define __HAVE_ARCH_PGD_OFFSET_GATE
#include <asm-generic/pgtable.h>
#endif /* _ASM_IA64_PGTABLE_H */