* the SpitFire page tables.
*/
+#include <asm-generic/pgtable-nopud.h>
+
#include <linux/config.h>
+#include <linux/compiler.h>
+#include <asm/types.h>
#include <asm/spitfire.h>
#include <asm/asi.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/const.h>
-/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 16MB).
- * The page copy blockops use 0x1000000 to 0x18000000 (16MB --> 24MB).
+/* The kernel image occupies 0x4000000 to 0x1000000 (4MB --> 32MB).
+ * The page copy blockops can use 0x2000000 to 0x10000000.
* The PROM resides in an area spanning 0xf0000000 to 0x100000000.
- * The vmalloc area spans 0x140000000 to 0x200000000.
+ * The vmalloc area spans 0x100000000 to 0x200000000.
+ * Since modules need to be in the lowest 32-bits of the address space,
+ * we place them right before the OBP area from 0x10000000 to 0xf0000000.
* There is a single static kernel PMD which maps from 0x0 to address
* 0x400000000.
*/
-#define TLBTEMP_BASE _AC(0x0000000001000000,UL)
-#define MODULES_VADDR _AC(0x0000000002000000,UL)
-#define MODULES_LEN _AC(0x000000007e000000,UL)
-#define MODULES_END _AC(0x0000000080000000,UL)
-#define VMALLOC_START _AC(0x0000000140000000,UL)
-#define VMALLOC_END _AC(0x0000000200000000,UL)
+#define TLBTEMP_BASE _AC(0x0000000002000000,UL)
+#define MODULES_VADDR _AC(0x0000000010000000,UL)
+#define MODULES_LEN _AC(0x00000000e0000000,UL)
+#define MODULES_END _AC(0x00000000f0000000,UL)
#define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL)
#define HI_OBP_ADDRESS _AC(0x0000000100000000,UL)
+#define VMALLOC_START _AC(0x0000000100000000,UL)
+#define VMALLOC_END _AC(0x0000000200000000,UL)
/* XXX All of this needs to be rethought so we can take advantage
* XXX cheetah's full 64-bit virtual address space, ie. no more hole
* table can map
*/
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
-#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-#define PMD_BITS 11
+#define PMD_BITS (PAGE_SHIFT - 2)
/* PGDIR_SHIFT determines what a third-level page table entry can map */
#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS)
-#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
+#define PGDIR_BITS (PAGE_SHIFT - 2)
#ifndef __ASSEMBLY__
#include <linux/sched.h>
-/* Certain architectures need to do special things when pte's
- * within a page table are directly modified. Thus, the following
- * hook is made available.
- */
-#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
-
/* Entries per page directory level. */
-#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
-
-/* We the first one in this file, what we export to the kernel
- * is different so we can optimize correctly for 32-bit tasks.
- */
-#define REAL_PTRS_PER_PMD (1UL << PMD_BITS)
-#define PTRS_PER_PMD ((const int)(test_thread_flag(TIF_32BIT) ? \
- (1UL << (32 - (PAGE_SHIFT-3) - PAGE_SHIFT)) : \
- (REAL_PTRS_PER_PMD)))
-
-/*
- * We cannot use the top address range because VPTE table lives there. This
- * formula finds the total legal virtual space in the processor, subtracts the
- * vpte size, then aligns it to the number of bytes mapped by one pgde, and
- * thus calculates the number of pgdes needed.
- */
-#define PTRS_PER_PGD (((1UL << VA_BITS) - VPTE_SIZE + (1UL << (PAGE_SHIFT + \
- (PAGE_SHIFT-3) + PMD_BITS)) - 1) / (1UL << (PAGE_SHIFT + \
- (PAGE_SHIFT-3) + PMD_BITS)))
+#define PTRS_PER_PTE (1UL << (PAGE_SHIFT-3))
+#define PTRS_PER_PMD (1UL << PMD_BITS)
+#define PTRS_PER_PGD (1UL << PGDIR_BITS)
/* Kernel has a separate 44bit address space. */
-#define USER_PTRS_PER_PGD ((const int)(test_thread_flag(TIF_32BIT)) ? \
- (1) : (PTRS_PER_PGD))
-#define FIRST_USER_PGD_NR 0
+#define FIRST_USER_ADDRESS 0
#define pte_ERROR(e) __builtin_trap()
#define pmd_ERROR(e) __builtin_trap()
#endif /* !(__ASSEMBLY__) */
/* Spitfire/Cheetah TTE bits. */
-#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
-#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date */
-#define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */
-#define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */
-#define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */
-#define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */
-#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */
-#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */
-#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
-#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr [40:13]*/
-#define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr [42:13] */
-#define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */
-#define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */
-#define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
-#define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
-#define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */
-#define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */
-#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */
-#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */
-
-/* Here are the SpitFire software bits we use in the TTE's. */
-#define _PAGE_FILE _AC(0x0000000000001000,UL) /* Pagecache page */
-#define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */
-#define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
-#define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */
-#define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */
-#define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */
+#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */
+#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/
+#define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */
+#define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */
+#define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */
+#define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */
+#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */
+#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */
+#define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */
+#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */
+#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */
+#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */
+#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */
+#define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */
+#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/
+#define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */
+#define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */
+#define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */
+#define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */
+#define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */
+#define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */
+#define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */
+#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */
+#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */
+
+/* Here are the SpitFire software bits we use in the TTE's.
+ *
+ * WARNING: If you are going to try and start using some
+ * of the soft2 bits, you will need to make
+ * modifications to the swap entry implementation.
+ * For example, one thing that could happen is that
+ * swp_entry_to_pte() would BUG_ON() if you tried
+ * to use one of the soft2 bits for _PAGE_FILE.
+ *
+ * Like other architectures, I have aliased _PAGE_FILE with
+ * _PAGE_MODIFIED. This works because _PAGE_FILE is never
+ * interpreted that way unless _PAGE_PRESENT is clear.
+ */
+#define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */
+#define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */
+#define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */
+#define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
+#define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */
+#define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */
+#define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */
#if PAGE_SHIFT == 13
#define _PAGE_SZBITS _PAGE_SZ8K
/* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
#define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
- __ACCESS_BITS | _PAGE_WRITE)
+ __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
#define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
- __ACCESS_BITS)
+ __ACCESS_BITS | _PAGE_EXEC)
#define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
- __ACCESS_BITS)
+ __ACCESS_BITS | _PAGE_EXEC)
#define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
- __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS)
+ __PRIV_BITS | \
+ __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC)
+
+#define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
+ _PAGE_CACHE | \
+ __ACCESS_BITS | _PAGE_WRITE)
+
+#define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
+ _PAGE_CACHE | __ACCESS_BITS)
+
+#define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
+ _PAGE_CACHE | __ACCESS_BITS)
#define _PFN_MASK _PAGE_PADDR
__ACCESS_BITS | _PAGE_E)
#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
+#define __P001 PAGE_READONLY_NOEXEC
+#define __P010 PAGE_COPY_NOEXEC
+#define __P011 PAGE_COPY_NOEXEC
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
+#define __S001 PAGE_READONLY_NOEXEC
+#define __S010 PAGE_SHARED_NOEXEC
+#define __S011 PAGE_SHARED_NOEXEC
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S110 PAGE_SHARED
/* PFNs are real physical page numbers. However, mem_map only begins to record
* per-page information starting at pfn_base. This is to handle systems where
- * the first physical page in the machine is at some huge physical address, such
- * as 4GB. This is common on a partitioned E10000, for example.
+ * the first physical page in the machine is at some huge physical address,
+ * such as 4GB. This is common on a partitioned E10000, for example.
*/
#define pfn_pte(pfn, prot) \
#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
-#define page_pte_prot(page, prot) mk_pte(page, prot)
-#define page_pte(page) page_pte_prot(page, __pgprot(0))
-
static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
{
pte_t __pte;
}
#define pmd_set(pmdp, ptep) \
(pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
-#define pgd_set(pgdp, pmdp) \
- (pgd_val(*(pgdp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
+#define pud_set(pudp, pmdp) \
+ (pud_val(*(pudp)) = (__pa((unsigned long) (pmdp)) >> 11UL))
#define __pmd_page(pmd) \
((unsigned long) __va((((unsigned long)pmd_val(pmd))<<11UL)))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
-#define pgd_page(pgd) \
- ((unsigned long) __va((((unsigned long)pgd_val(pgd))<<11UL)))
+#define pud_page(pud) \
+ ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
#define pte_none(pte) (!pte_val(pte))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
-#define pte_clear(pte) (pte_val(*(pte)) = 0UL)
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (0)
#define pmd_present(pmd) (pmd_val(pmd) != 0U)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0U)
-#define pgd_none(pgd) (!pgd_val(pgd))
-#define pgd_bad(pgd) (0)
-#define pgd_present(pgd) (pgd_val(pgd) != 0U)
-#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0U)
+#define pud_none(pud) (!pud_val(pud))
+#define pud_bad(pud) (0)
+#define pud_present(pud) (pud_val(pud) != 0U)
+#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
/* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
-#define pte_exec(pte) pte_read(pte)
+#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC)
#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R))
#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE))
#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE))
/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+/* extract the pgd cache used for optimizing the tlb miss
+ * slow path when executing 32-bit compat processes
+ */
+#define get_pgd_cache(pgd) ((unsigned long) pgd_val(*pgd) << 11)
+
/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir, address) \
- ((pmd_t *) pgd_page(*(dir)) + \
- ((address >> PMD_SHIFT) & (REAL_PTRS_PER_PMD-1)))
+#define pmd_offset(pudp, address) \
+ ((pmd_t *) pud_page(*(pudp)) + \
+ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)))
/* Find an entry in the third-level page table.. */
#define pte_index(dir, address) \
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
-extern pgd_t swapper_pg_dir[1];
+/* Actual page table PTE updates. */
+extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig);
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
+{
+ pte_t orig = *ptep;
+
+ *ptep = pte;
+
+ /* It is more efficient to let flush_tlb_kernel_range()
+ * handle init_mm tlb flushes.
+ */
+ if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
+ tlb_batch_add(mm, addr, ptep, orig);
+}
+
+#define pte_clear(mm,addr,ptep) \
+ set_pte_at((mm), (addr), (ptep), __pte(0UL))
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+#define __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) \
+({ \
+ pte_t newpte = (pte); \
+ if (pte_present(pte)) { \
+ unsigned long this_pfn = pte_pfn(pte); \
+ \
+ if (pfn_valid(this_pfn) && \
+ (((old_addr) ^ (new_addr)) & (1 << 13))) \
+ flush_dcache_page_all(current->mm, \
+ pfn_to_page(this_pfn)); \
+ } \
+ newpte; \
+})
+#endif
+
+extern pgd_t swapper_pg_dir[2048];
+extern pmd_t swapper_low_pmd_dir[2048];
+
+extern void paging_init(void);
+extern unsigned long find_ecache_flush_span(unsigned long size);
/* These do nothing with the way I have things setup. */
#define mmu_lockarea(vaddr, len) (vaddr)
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
-/* Make a non-present pseudo-TTE. */
-static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
-{
- pte_t pte;
- pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
- ~(unsigned long)_PAGE_CACHE);
- pte_val(pte) |= (((unsigned long)space) << 32);
- return pte;
-}
-
/* Encode and de-code a swap entry */
#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
#define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL))
sun4u_get_pte (unsigned long addr)
{
pgd_t *pgdp;
+ pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
if (addr >= PAGE_OFFSET)
return addr & _PAGE_PADDR;
if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
- return prom_virt_to_phys(addr, 0);
+ return prom_virt_to_phys(addr, NULL);
pgdp = pgd_offset_k(addr);
- pmdp = pmd_offset(pgdp, addr);
+ pudp = pud_offset(pgdp, addr);
+ pmdp = pmd_offset(pudp, addr);
ptep = pte_offset_kernel(pmdp, addr);
return pte_val(*ptep) & _PAGE_PADDR;
}
#define kern_addr_valid(addr) \
(test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap))
-extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from,
- unsigned long offset,
- unsigned long size, pgprot_t prot, int space);
+extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
+ unsigned long pfn,
+ unsigned long size, pgprot_t prot);
+
+/* Clear virtual and physical cachability, set side-effect bit. */
+#define pgprot_noncached(prot) \
+ (__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \
+ _PAGE_E))
+
+/*
+ * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
+ * its high 4 bits. These macros/functions put it there or get it from there.
+ */
+#define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
+#define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
+#define GET_PFN(pfn) (pfn & 0x0fffffffffffffffUL)
#include <asm-generic/pgtable.h>