2 * PowerPC memory management structures
4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
16 #include <linux/config.h>
18 #include <linux/stringify.h>
22 /* Time to allow for more things here */
23 typedef unsigned long mm_context_id_t;
26 #ifdef CONFIG_HUGETLB_PAGE
28 u16 htlb_segs; /* bitmask */
32 #define STE_ESID_V 0x80
33 #define STE_ESID_KS 0x20
34 #define STE_ESID_KP 0x10
35 #define STE_ESID_N 0x08
37 #define STE_VSID_SHIFT 12
40 unsigned long esid_data;
41 unsigned long vsid_data;
44 /* Hardware Page Table Entry */
46 #define HPTES_PER_GROUP 8
49 unsigned long avpn:57; /* vsid | api == avpn */
50 unsigned long : 2; /* Software use */
51 unsigned long bolted: 1; /* HPTE is "bolted" */
52 unsigned long lock: 1; /* lock on pSeries SMP */
53 unsigned long l: 1; /* Virtual page is large (L=1) or 4 KB (L=0) */
54 unsigned long h: 1; /* Hash function identifier */
55 unsigned long v: 1; /* Valid (v=1) or invalid (v=0) */
59 unsigned long pp0: 1; /* Page protection bit 0 */
60 unsigned long ts: 1; /* Tag set bit */
61 unsigned long rpn: 50; /* Real page number */
62 unsigned long : 2; /* Reserved */
63 unsigned long ac: 1; /* Address compare */
64 unsigned long r: 1; /* Referenced */
65 unsigned long c: 1; /* Changed */
66 unsigned long w: 1; /* Write-thru cache mode */
67 unsigned long i: 1; /* Cache inhibited */
68 unsigned long m: 1; /* Memory coherence required */
69 unsigned long g: 1; /* Guarded */
70 unsigned long n: 1; /* No-execute */
71 unsigned long pp: 2; /* Page protection bits 1:2 */
75 char padding[6]; /* padding */
76 unsigned long : 6; /* padding */
77 unsigned long flags: 10; /* HPTE flags */
89 Hpte_dword1_flags flags;
93 /* Values for PP (assumes Ks=0, Kp=1) */
94 /* pp0 will always be 0 for linux */
95 #define PP_RWXX 0 /* Supervisor read/write, User none */
96 #define PP_RWRX 1 /* Supervisor read/write, User read */
97 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
98 #define PP_RXRX 3 /* Supervisor read, User read */
103 unsigned long htab_num_ptegs;
104 unsigned long htab_hash_mask;
105 unsigned long next_round_robin;
106 unsigned long last_kernel_address;
109 extern HTAB htab_data;
111 static inline unsigned long hpt_hash(unsigned long vpn, int large)
124 return (vsid & 0x7fffffffffUL) ^ page;
127 static inline void __tlbie(unsigned long va, int large)
129 /* clear top 16 bits, non SLS segment */
130 va &= ~(0xffffULL << 48);
133 asm volatile("tlbie %0,1" : : "r"(va) : "memory");
135 asm volatile("tlbie %0,0" : : "r"(va) : "memory");
138 static inline void tlbie(unsigned long va, int large)
140 asm volatile("ptesync": : :"memory");
142 asm volatile("eieio; tlbsync; ptesync": : :"memory");
145 static inline void __tlbiel(unsigned long va)
147 /* clear top 16 bits, non SLS segment */
148 va &= ~(0xffffULL << 48);
151 * Thanks to Alan Modra we are now able to use machine specific
152 * assembly instructions (like tlbiel) by using the gas -many flag.
153 * However we have to support older toolchains so for the moment
157 asm volatile("tlbiel %0" : : "r"(va) : "memory");
159 asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
163 static inline void tlbiel(unsigned long va)
165 asm volatile("ptesync": : :"memory");
167 asm volatile("ptesync": : :"memory");
171 * Handle a fault by adding an HPTE. If the address can't be determined
172 * to be valid via Linux page tables, return 1. If handled return 0
174 extern int __hash_page(unsigned long ea, unsigned long access,
175 unsigned long vsid, pte_t *ptep, unsigned long trap,
178 extern void htab_finish_init(void);
180 #endif /* __ASSEMBLY__ */
183 * Location of cpu0's segment table
185 #define STAB0_PAGE 0x9
186 #define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
187 #define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
189 #define SLB_NUM_BOLTED 3
190 #define SLB_CACHE_ENTRIES 8
192 /* Bits in the SLB ESID word */
193 #define SLB_ESID_V 0x0000000008000000 /* entry is valid */
195 /* Bits in the SLB VSID word */
196 #define SLB_VSID_SHIFT 12
197 #define SLB_VSID_KS 0x0000000000000800
198 #define SLB_VSID_KP 0x0000000000000400
199 #define SLB_VSID_N 0x0000000000000200 /* no-execute */
200 #define SLB_VSID_L 0x0000000000000100 /* largepage (4M) */
201 #define SLB_VSID_C 0x0000000000000080 /* class */
203 #define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
204 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
206 #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
208 #define VSID_MODULUS ((1UL<<VSID_BITS)-1)
210 #define CONTEXT_BITS 20
211 #define USER_ESID_BITS 15
214 * This macro generates asm code to compute the VSID scramble
215 * function. Used in slb_allocate() and do_stab_bolted. The function
216 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
218 * rt = register continaing the proto-VSID and into which the
219 * VSID will be stored
220 * rx = scratch register (clobbered)
222 * - rt and rx must be different registers
223 * - The answer will end up in the low 36 bits of rt. The higher
224 * bits may contain other garbage, so you may need to mask the
227 #define ASM_VSID_SCRAMBLE(rt, rx) \
228 lis rx,VSID_MULTIPLIER@h; \
229 ori rx,rx,VSID_MULTIPLIER@l; \
230 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
232 srdi rx,rt,VSID_BITS; \
233 clrldi rt,rt,(64-VSID_BITS); \
234 add rt,rt,rx; /* add high and low bits */ \
235 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
236 * 2^36-1+2^28-1. That in particular means that if r3 >= \
237 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
238 * the bit clear, r3 already has the answer we want, if it \
239 * doesn't, the answer is the low 36 bits of r3+1. So in all \
240 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
242 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
245 #endif /* _PPC64_MMU_H_ */