2 * PowerPC memory management structures
4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
16 #include <linux/config.h>
18 #include <linux/stringify.h>
22 /* Time to allow for more things here */
23 typedef unsigned long mm_context_id_t;
26 #ifdef CONFIG_HUGETLB_PAGE
27 u16 htlb_segs; /* bitmask */
31 #define STE_ESID_V 0x80
32 #define STE_ESID_KS 0x20
33 #define STE_ESID_KP 0x10
34 #define STE_ESID_N 0x08
36 #define STE_VSID_SHIFT 12
39 unsigned long esid_data;
40 unsigned long vsid_data;
43 /* Hardware Page Table Entry */
45 #define HPTES_PER_GROUP 8
48 unsigned long avpn:57; /* vsid | api == avpn */
49 unsigned long : 2; /* Software use */
50 unsigned long bolted: 1; /* HPTE is "bolted" */
51 unsigned long lock: 1; /* lock on pSeries SMP */
52 unsigned long l: 1; /* Virtual page is large (L=1) or 4 KB (L=0) */
53 unsigned long h: 1; /* Hash function identifier */
54 unsigned long v: 1; /* Valid (v=1) or invalid (v=0) */
58 unsigned long pp0: 1; /* Page protection bit 0 */
59 unsigned long ts: 1; /* Tag set bit */
60 unsigned long rpn: 50; /* Real page number */
61 unsigned long : 2; /* Reserved */
62 unsigned long ac: 1; /* Address compare */
63 unsigned long r: 1; /* Referenced */
64 unsigned long c: 1; /* Changed */
65 unsigned long w: 1; /* Write-thru cache mode */
66 unsigned long i: 1; /* Cache inhibited */
67 unsigned long m: 1; /* Memory coherence required */
68 unsigned long g: 1; /* Guarded */
69 unsigned long n: 1; /* No-execute */
70 unsigned long pp: 2; /* Page protection bits 1:2 */
74 char padding[6]; /* padding */
75 unsigned long : 6; /* padding */
76 unsigned long flags: 10; /* HPTE flags */
88 Hpte_dword1_flags flags;
92 /* Values for PP (assumes Ks=0, Kp=1) */
93 /* pp0 will always be 0 for linux */
94 #define PP_RWXX 0 /* Supervisor read/write, User none */
95 #define PP_RWRX 1 /* Supervisor read/write, User read */
96 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
97 #define PP_RXRX 3 /* Supervisor read, User read */
102 unsigned long htab_num_ptegs;
103 unsigned long htab_hash_mask;
104 unsigned long next_round_robin;
105 unsigned long last_kernel_address;
108 extern HTAB htab_data;
110 static inline unsigned long hpt_hash(unsigned long vpn, int large)
123 return (vsid & 0x7fffffffffUL) ^ page;
126 static inline void __tlbie(unsigned long va, int large)
128 /* clear top 16 bits, non SLS segment */
129 va &= ~(0xffffULL << 48);
132 asm volatile("tlbie %0,1" : : "r"(va) : "memory");
134 asm volatile("tlbie %0,0" : : "r"(va) : "memory");
137 static inline void tlbie(unsigned long va, int large)
139 asm volatile("ptesync": : :"memory");
141 asm volatile("eieio; tlbsync; ptesync": : :"memory");
144 static inline void __tlbiel(unsigned long va)
146 /* clear top 16 bits, non SLS segment */
147 va &= ~(0xffffULL << 48);
150 * Thanks to Alan Modra we are now able to use machine specific
151 * assembly instructions (like tlbiel) by using the gas -many flag.
152 * However we have to support older toolchains so for the moment
156 asm volatile("tlbiel %0" : : "r"(va) : "memory");
158 asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
162 static inline void tlbiel(unsigned long va)
164 asm volatile("ptesync": : :"memory");
166 asm volatile("ptesync": : :"memory");
170 * Handle a fault by adding an HPTE. If the address can't be determined
171 * to be valid via Linux page tables, return 1. If handled return 0
173 extern int __hash_page(unsigned long ea, unsigned long access,
174 unsigned long vsid, pte_t *ptep, unsigned long trap,
177 extern void htab_finish_init(void);
179 #endif /* __ASSEMBLY__ */
182 * Location of cpu0's segment table
184 #define STAB0_PAGE 0x9
185 #define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
186 #define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
188 #define SLB_NUM_BOLTED 3
189 #define SLB_CACHE_ENTRIES 8
191 /* Bits in the SLB ESID word */
192 #define SLB_ESID_V 0x0000000008000000 /* entry is valid */
194 /* Bits in the SLB VSID word */
195 #define SLB_VSID_SHIFT 12
196 #define SLB_VSID_KS 0x0000000000000800
197 #define SLB_VSID_KP 0x0000000000000400
198 #define SLB_VSID_N 0x0000000000000200 /* no-execute */
199 #define SLB_VSID_L 0x0000000000000100 /* largepage (4M) */
200 #define SLB_VSID_C 0x0000000000000080 /* class */
202 #define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
203 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
205 #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
207 #define VSID_MODULUS ((1UL<<VSID_BITS)-1)
209 #define CONTEXT_BITS 20
210 #define USER_ESID_BITS 15
213 * This macro generates asm code to compute the VSID scramble
214 * function. Used in slb_allocate() and do_stab_bolted. The function
215 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
217 * rt = register continaing the proto-VSID and into which the
218 * VSID will be stored
219 * rx = scratch register (clobbered)
221 * - rt and rx must be different registers
222 * - The answer will end up in the low 36 bits of rt. The higher
223 * bits may contain other garbage, so you may need to mask the
226 #define ASM_VSID_SCRAMBLE(rt, rx) \
227 lis rx,VSID_MULTIPLIER@h; \
228 ori rx,rx,VSID_MULTIPLIER@l; \
229 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
231 srdi rx,rt,VSID_BITS; \
232 clrldi rt,rt,(64-VSID_BITS); \
233 add rt,rt,rx; /* add high and low bits */ \
234 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
235 * 2^36-1+2^28-1. That in particular means that if r3 >= \
236 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
237 * the bit clear, r3 already has the answer we want, if it \
238 * doesn't, the answer is the low 36 bits of r3+1. So in all \
239 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
241 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
244 #endif /* _PPC64_MMU_H_ */