#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT))
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
-#define FIRST_USER_PGD_NR 0
+#define FIRST_USER_ADDRESS 0
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
/* ERPN in a PTE never gets cleared, ignore it */
#define _PTE_NONE_MASK 0xffffffff00000000ULL
-#elif defined(CONFIG_E500)
-
+#elif defined(CONFIG_FSL_BOOKE)
/*
MMU Assist Register 3:
entries use the top 29 bits.
*/
-/* Definitions for e500 core */
-#define _PAGE_PRESENT 0x001 /* S: PTE contains a translation */
-#define _PAGE_USER 0x002 /* S: User page (maps to UR) */
-#define _PAGE_FILE 0x002 /* S: when !present: nonlinear file mapping */
-#define _PAGE_ACCESSED 0x004 /* S: Page referenced */
-#define _PAGE_HWWRITE 0x008 /* H: Dirty & RW, set in exception */
-#define _PAGE_RW 0x010 /* S: Write permission */
-#define _PAGE_HWEXEC 0x020 /* H: UX permission */
-
-#define _PAGE_ENDIAN 0x040 /* H: E bit */
-#define _PAGE_GUARDED 0x080 /* H: G bit */
-#define _PAGE_COHERENT 0x100 /* H: M bit */
-#define _PAGE_NO_CACHE 0x200 /* H: I bit */
-#define _PAGE_WRITETHRU 0x400 /* H: W bit */
-#define _PAGE_DIRTY 0x800 /* S: Page dirty */
+/* Definitions for FSL Book-E Cores */
+#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */
+#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */
+#define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */
+#define _PAGE_ACCESSED 0x00004 /* S: Page referenced */
+#define _PAGE_HWWRITE 0x00008 /* H: Dirty & RW, set in exception */
+#define _PAGE_RW 0x00010 /* S: Write permission */
+#define _PAGE_HWEXEC 0x00020 /* H: UX permission */
+
+#define _PAGE_ENDIAN 0x00040 /* H: E bit */
+#define _PAGE_GUARDED 0x00080 /* H: G bit */
+#define _PAGE_COHERENT 0x00100 /* H: M bit */
+#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
+#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
+
+#ifdef CONFIG_PTE_64BIT
+#define _PAGE_DIRTY 0x08000 /* S: Page dirty */
+
+/* ERPN in a PTE never gets cleared, ignore it */
+#define _PTE_NONE_MASK 0xffffffffffff0000ULL
+#else
+#define _PAGE_DIRTY 0x00800 /* S: Page dirty */
+#endif
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
* Conversions between PTE values and page frame numbers.
*/
-#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
+/* in some case we want to additionaly adjust where the pfn is in the pte to
+ * allow room for more flags */
+#if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
+#define PFN_SHIFT_OFFSET (PAGE_SHIFT + 8)
+#else
+#define PFN_SHIFT_OFFSET (PAGE_SHIFT)
+#endif
+
+#define pte_pfn(x) (pte_val(x) >> PFN_SHIFT_OFFSET)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define pfn_pte(pfn, prot) __pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pte(pfn, prot) __pte(((pte_basic_t)(pfn) << PFN_SHIFT_OFFSET) |\
+ pgprot_val(prot))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
/*
#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
-#define pte_clear(ptep) do { set_pte((ptep), __pte(0)); } while (0)
+#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
return pte;
}
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_pages(unsigned context, unsigned long va,
+ unsigned long pmdval, int count);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va,
+ unsigned long pmdval);
+
/*
* Atomic PTE updates.
*
* pte_update clears and sets bit atomically, and returns
- * the old pte value.
- * The ((unsigned long)(p+1) - 4) hack is to get to the least-significant
- * 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
+ * the old pte value. In the 64-bit PTE case we lock around the
+ * low PTE word since we expect ALL flag bits to be there
*/
+#ifndef CONFIG_PTE_64BIT
static inline unsigned long pte_update(pte_t *p, unsigned long clr,
unsigned long set)
{
" stwcx. %1,0,%3\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
- : "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p)
+ : "r" (p), "r" (clr), "r" (set), "m" (*p)
+ : "cc" );
+ return old;
+}
+#else
+static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
+ unsigned long set)
+{
+ unsigned long long old;
+ unsigned long tmp;
+
+ __asm__ __volatile__("\
+1: lwarx %L0,0,%4\n\
+ lwzx %0,0,%3\n\
+ andc %1,%L0,%5\n\
+ or %1,%1,%6\n"
+ PPC405_ERR77(0,%3)
+" stwcx. %1,0,%4\n\
+ bne- 1b"
+ : "=&r" (old), "=&r" (tmp), "=m" (*p)
+ : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
: "cc" );
return old;
}
+#endif
/*
* set_pte stores a linux PTE into the linux page table.
* On machines which use an MMU hash table we avoid changing the
* _PAGE_HASHPTE bit.
*/
-static inline void set_pte(pte_t *ptep, pte_t pte)
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
{
#if _PAGE_HASHPTE != 0
pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
#endif
}
-extern void flush_hash_one_pte(pte_t *ptep);
-
/*
* 2.6 calles this without flushing the TLB entry, this is wrong
* for our hash-based implementation, we fix that up here
*/
-static inline int ptep_test_and_clear_young(pte_t *ptep)
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
{
unsigned long old;
old = pte_update(ptep, _PAGE_ACCESSED, 0);
#if _PAGE_HASHPTE != 0
- if (old & _PAGE_HASHPTE)
- flush_hash_one_pte(ptep);
+ if (old & _PAGE_HASHPTE) {
+ unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+ flush_hash_pages(context, addr, ptephys, 1);
+ }
#endif
return (old & _PAGE_ACCESSED) != 0;
}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+ __ptep_test_and_clear_young((__vma)->vm_mm->context, __addr, __ptep)
-static inline int ptep_test_and_clear_dirty(pte_t *ptep)
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
+static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
{
return (pte_update(ptep, (_PAGE_DIRTY | _PAGE_HWWRITE), 0) & _PAGE_DIRTY) != 0;
}
-static inline pte_t ptep_get_and_clear(pte_t *ptep)
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
}
-static inline void ptep_set_wrprotect(pte_t *ptep)
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), 0);
}
-static inline void ptep_mkdirty(pte_t *ptep)
-{
- pte_update(ptep, 0, _PAGE_DIRTY);
-}
-
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
{
*/
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
+ unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#define __HAVE_ARCH_PTE_SAME
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
/*
extern void paging_init(void);
-/*
- * When flushing the tlb entry for a page, we also need to flush the hash
- * table entry. flush_hash_pages is assembler (for speed) in hashtable.S.
- */
-extern int flush_hash_pages(unsigned context, unsigned long va,
- unsigned long pmdval, int count);
-
-/* Add an HPTE to the hash table */
-extern void add_hash_page(unsigned context, unsigned long va,
- unsigned long pmdval);
-
/*
* Encode and decode a swap entry.
* Note that the bits we use in a PTE for representing a swap entry
phys_addr_t paddr64 = fixup_bigphys_addr(paddr, size);
return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
}
+
+static inline int io_remap_pfn_range(struct vm_area_struct *vma,
+ unsigned long vaddr,
+ unsigned long pfn,
+ unsigned long size,
+ pgprot_t prot)
+{
+ phys_addr_t paddr64 = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
+ return remap_pfn_range(vma, vaddr, paddr64 >> PAGE_SHIFT, size, prot);
+}
#else
#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
+#define MK_IOSPACE_PFN(space, pfn) (pfn)
+#define GET_IOSPACE(pfn) 0
+#define GET_PFN(pfn) (pfn)
+
/*
* No page table caches to initialise
*/
extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep);
-#endif /* !__ASSEMBLY__ */
-
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
-#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
-#define __HAVE_ARCH_PTEP_SET_WRPROTECT
-#define __HAVE_ARCH_PTEP_MKDIRTY
-#define __HAVE_ARCH_PTE_SAME
#include <asm-generic/pgtable.h>
+#endif /* !__ASSEMBLY__ */
+
#endif /* _PPC_PGTABLE_H */
#endif /* __KERNEL__ */