1 /* $Id: pgtsrmmu.h,v 1.31 2000/07/16 21:48:52 anton Exp $
2 * pgtsrmmu.h: SRMMU page table defines and code.
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
7 #ifndef _SPARC_PGTSRMMU_H
8 #define _SPARC_PGTSRMMU_H
13 #include <asm/thread_info.h> /* TI_UWINMASK for WINDOW_FLUSH */
16 /* Number of contexts is implementation-dependent; 64k is the most we support */
17 #define SRMMU_MAX_CONTEXTS 65536
19 /* PMD_SHIFT determines the size of the area a second-level page table entry can map */
20 #define SRMMU_PMD_SHIFT 18
21 #define SRMMU_PMD_SIZE (1UL << SRMMU_PMD_SHIFT)
22 #define SRMMU_PMD_MASK (~(SRMMU_PMD_SIZE-1))
23 /* #define SRMMU_PMD_ALIGN(addr) (((addr)+SRMMU_PMD_SIZE-1)&SRMMU_PMD_MASK) */
25 /* PGDIR_SHIFT determines what a third-level page table entry can map */
26 #define SRMMU_PGDIR_SHIFT 24
27 #define SRMMU_PGDIR_SIZE (1UL << SRMMU_PGDIR_SHIFT)
28 #define SRMMU_PGDIR_MASK (~(SRMMU_PGDIR_SIZE-1))
29 #define SRMMU_PGDIR_ALIGN(addr) (((addr)+SRMMU_PGDIR_SIZE-1)&SRMMU_PGDIR_MASK)
31 #define SRMMU_PTRS_PER_PTE 64
32 #define SRMMU_PTRS_PER_PMD 64
33 #define SRMMU_PTRS_PER_PGD 256
35 #define SRMMU_PTE_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */
36 #define SRMMU_PMD_TABLE_SIZE 0x100 /* 64 entries, 4 bytes a piece */
37 #define SRMMU_PGD_TABLE_SIZE 0x400 /* 256 entries, 4 bytes a piece */
40 * To support pagetables in highmem, Linux introduces APIs which
41 * return struct page* and generally manipulate page tables when
42 * they are not mapped into kernel space. Our hardware page tables
43 * are smaller than pages. We lump hardware tabes into big, page sized
46 * PMD_SHIFT determines the size of the area a second-level page table entry
47 * can map, and our pmd_t is 16 times larger than normal.
49 #define SRMMU_PTRS_PER_PTE_SOFT (PAGE_SIZE/4) /* 16 hard tables per 4K page */
50 #define SRMMU_PTRS_PER_PMD_SOFT 4 /* Each pmd_t contains 16 hard PTPs */
51 #define SRMMU_PTE_SZ_SOFT PAGE_SIZE /* same as above, in bytes */
53 #define SRMMU_PMD_SHIFT_SOFT 22
54 #define SRMMU_PMD_SIZE_SOFT (1UL << SRMMU_PMD_SHIFT_SOFT)
55 #define SRMMU_PMD_MASK_SOFT (~(SRMMU_PMD_SIZE_SOFT-1))
56 #define SRMMU_PMD_ALIGN_SOFT(addr) (((addr)+SRMMU_PMD_SIZE_SOFT-1)&SRMMU_PMD_MASK_SOFT)
58 /* Definition of the values in the ET field of PTD's and PTE's */
59 #define SRMMU_ET_MASK 0x3
60 #define SRMMU_ET_INVALID 0x0
61 #define SRMMU_ET_PTD 0x1
62 #define SRMMU_ET_PTE 0x2
63 #define SRMMU_ET_REPTE 0x3 /* AIEEE, SuperSparc II reverse endian page! */
65 /* Physical page extraction from PTP's and PTE's. */
66 #define SRMMU_CTX_PMASK 0xfffffff0
67 #define SRMMU_PTD_PMASK 0xfffffff0
68 #define SRMMU_PTE_PMASK 0xffffff00
70 /* The pte non-page bits. Some notes:
71 * 1) cache, dirty, valid, and ref are frobbable
72 * for both supervisor and user pages.
73 * 2) exec and write will only give the desired effect
75 * 3) use priv and priv_readonly for changing the
76 * characteristics of supervisor ptes
78 #define SRMMU_CACHE 0x80
79 #define SRMMU_DIRTY 0x40
80 #define SRMMU_REF 0x20
81 #define SRMMU_EXEC 0x08
82 #define SRMMU_WRITE 0x04
83 #define SRMMU_VALID 0x02 /* SRMMU_ET_PTE */
84 #define SRMMU_PRIV 0x1c
85 #define SRMMU_PRIV_RDONLY 0x18
87 #define SRMMU_FILE 0x40 /* Implemented in software */
89 #define SRMMU_PTE_FILE_SHIFT 8 /* == 32-PTE_FILE_MAX_BITS */
91 #define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY)
93 /* SRMMU swap entry encoding
95 * We use 5 bits for the type and 19 for the offset. This gives us
96 * 32 swapfiles of 4GB each. Encoding looks like:
98 * oooooooooooooooooootttttRRRRRRRR
99 * fedcba9876543210fedcba9876543210
101 * The bottom 8 bits are reserved for protection and status bits, especially
104 #define SRMMU_SWP_TYPE_MASK 0x1f
105 #define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT
106 #define SRMMU_SWP_OFF_MASK 0x7ffff
107 #define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5)
109 /* Some day I will implement true fine grained access bits for
110 * user pages because the SRMMU gives us the capabilities to
111 * enforce all the protection levels that vma's can have.
114 #define SRMMU_PAGE_NONE __pgprot(SRMMU_VALID | SRMMU_CACHE | \
115 SRMMU_PRIV | SRMMU_REF)
116 #define SRMMU_PAGE_SHARED __pgprot(SRMMU_VALID | SRMMU_CACHE | \
117 SRMMU_EXEC | SRMMU_WRITE | SRMMU_REF)
118 #define SRMMU_PAGE_COPY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
119 SRMMU_EXEC | SRMMU_REF)
120 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
121 SRMMU_EXEC | SRMMU_REF)
122 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
123 SRMMU_DIRTY | SRMMU_REF)
125 /* SRMMU Register addresses in ASI 0x4. These are valid for all
126 * current SRMMU implementations that exist.
128 #define SRMMU_CTRL_REG 0x00000000
129 #define SRMMU_CTXTBL_PTR 0x00000100
130 #define SRMMU_CTX_REG 0x00000200
131 #define SRMMU_FAULT_STATUS 0x00000300
132 #define SRMMU_FAULT_ADDR 0x00000400
134 #define WINDOW_FLUSH(tmp1, tmp2) \
136 98: ld [%g6 + TI_UWINMASK], tmp2; \
137 orcc %g0, tmp2, %g0; \
140 save %sp, -64, %sp; \
141 99: subcc tmp1, 1, tmp1; \
143 restore %g0, %g0, %g0;
147 /* This makes sense. Honest it does - Anton */
148 /* XXX Yes but it's ugly as sin. FIXME. -KMW */
149 extern void *srmmu_nocache_pool;
150 #define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
151 #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
152 #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
154 /* Accessing the MMU control register. */
155 extern __inline__ unsigned int srmmu_get_mmureg(void)
158 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
160 "i" (ASI_M_MMUREGS));
164 extern __inline__ void srmmu_set_mmureg(unsigned long regval)
166 __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
167 "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
171 extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
173 paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
174 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
175 "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
176 "i" (ASI_M_MMUREGS) :
180 extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
184 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
186 "r" (SRMMU_CTXTBL_PTR),
187 "i" (ASI_M_MMUREGS));
188 return (retval & SRMMU_CTX_PMASK) << 4;
191 extern __inline__ void srmmu_set_context(int context)
193 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
194 "r" (context), "r" (SRMMU_CTX_REG),
195 "i" (ASI_M_MMUREGS) : "memory");
198 extern __inline__ int srmmu_get_context(void)
201 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
204 "i" (ASI_M_MMUREGS));
208 extern __inline__ unsigned int srmmu_get_fstatus(void)
212 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
214 "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
218 extern __inline__ unsigned int srmmu_get_faddr(void)
222 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
224 "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
228 /* This is guaranteed on all SRMMU's. */
229 extern __inline__ void srmmu_flush_whole_tlb(void)
231 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
232 "r" (0x400), /* Flush entire TLB!! */
233 "i" (ASI_M_FLUSH_PROBE) : "memory");
237 /* These flush types are not available on all chips... */
238 extern __inline__ void srmmu_flush_tlb_ctx(void)
240 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
241 "r" (0x300), /* Flush TLB ctx.. */
242 "i" (ASI_M_FLUSH_PROBE) : "memory");
246 extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
248 addr &= SRMMU_PGDIR_MASK;
249 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
250 "r" (addr | 0x200), /* Flush TLB region.. */
251 "i" (ASI_M_FLUSH_PROBE) : "memory");
256 extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
258 addr &= SRMMU_PMD_MASK;
259 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
260 "r" (addr | 0x100), /* Flush TLB segment.. */
261 "i" (ASI_M_FLUSH_PROBE) : "memory");
265 extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
268 __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
269 "r" (page), /* Flush TLB page.. */
270 "i" (ASI_M_FLUSH_PROBE) : "memory");
274 extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
276 unsigned long retval;
279 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
281 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
286 extern __inline__ int
287 srmmu_get_pte (unsigned long addr)
289 register unsigned long entry;
291 __asm__ __volatile__("\n\tlda [%1] %2,%0\n\t" :
293 "r" ((addr & 0xfffff000) | 0x400), "i" (ASI_M_FLUSH_PROBE));
297 extern unsigned long (*srmmu_read_physical)(unsigned long paddr);
298 extern void (*srmmu_write_physical)(unsigned long paddr, unsigned long word);
300 #endif /* !(__ASSEMBLY__) */
302 #endif /* !(_SPARC_PGTSRMMU_H) */