2 * PowerPC memory management structures
4 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
16 #include <linux/config.h>
21 /* Time to allow for more things here */
22 typedef unsigned long mm_context_id_t;
25 #ifdef CONFIG_HUGETLB_PAGE
26 u16 htlb_segs; /* bitmask */
30 #ifdef CONFIG_HUGETLB_PAGE
31 #define KERNEL_LOW_HPAGES .htlb_segs = 0,
33 #define KERNEL_LOW_HPAGES
36 #define KERNEL_CONTEXT(ea) ({ \
37 mm_context_t ctx = { .id = REGION_ID(ea), KERNEL_LOW_HPAGES}; \
41 unsigned long esid: 36; /* Effective segment ID */
42 unsigned long resv0:20; /* Reserved */
43 unsigned long v: 1; /* Entry valid (v=1) or invalid */
44 unsigned long resv1: 1; /* Reserved */
45 unsigned long ks: 1; /* Supervisor (privileged) state storage key */
46 unsigned long kp: 1; /* Problem state storage key */
47 unsigned long n: 1; /* No-execute if n=1 */
48 unsigned long resv2: 3; /* padding to a 64b boundary */
52 unsigned long vsid: 52; /* Virtual segment ID */
53 unsigned long resv0:12; /* Padding to a 64b boundary */
68 /* Hardware Page Table Entry */
70 #define HPTES_PER_GROUP 8
73 unsigned long avpn:57; /* vsid | api == avpn */
74 unsigned long : 2; /* Software use */
75 unsigned long bolted: 1; /* HPTE is "bolted" */
76 unsigned long lock: 1; /* lock on pSeries SMP */
77 unsigned long l: 1; /* Virtual page is large (L=1) or 4 KB (L=0) */
78 unsigned long h: 1; /* Hash function identifier */
79 unsigned long v: 1; /* Valid (v=1) or invalid (v=0) */
83 unsigned long pp0: 1; /* Page protection bit 0 */
84 unsigned long ts: 1; /* Tag set bit */
85 unsigned long rpn: 50; /* Real page number */
86 unsigned long : 2; /* Reserved */
87 unsigned long ac: 1; /* Address compare */
88 unsigned long r: 1; /* Referenced */
89 unsigned long c: 1; /* Changed */
90 unsigned long w: 1; /* Write-thru cache mode */
91 unsigned long i: 1; /* Cache inhibited */
92 unsigned long m: 1; /* Memory coherence required */
93 unsigned long g: 1; /* Guarded */
94 unsigned long n: 1; /* No-execute */
95 unsigned long pp: 2; /* Page protection bits 1:2 */
99 char padding[6]; /* padding */
100 unsigned long : 6; /* padding */
101 unsigned long flags: 10; /* HPTE flags */
106 unsigned long dword0;
111 unsigned long dword1;
113 Hpte_dword1_flags flags;
117 /* Values for PP (assumes Ks=0, Kp=1) */
118 /* pp0 will always be 0 for linux */
119 #define PP_RWXX 0 /* Supervisor read/write, User none */
120 #define PP_RWRX 1 /* Supervisor read/write, User read */
121 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
122 #define PP_RXRX 3 /* Supervisor read, User read */
127 unsigned long htab_num_ptegs;
128 unsigned long htab_hash_mask;
129 unsigned long next_round_robin;
130 unsigned long last_kernel_address;
133 extern HTAB htab_data;
135 void invalidate_hpte( unsigned long slot );
136 long select_hpte_slot( unsigned long vpn );
137 void create_valid_hpte( unsigned long slot, unsigned long vpn,
138 unsigned long prpn, unsigned hash,
139 void * ptep, unsigned hpteflags,
142 #define PD_SHIFT (10+12) /* Page directory */
143 #define PD_MASK 0x02FF
144 #define PT_SHIFT (12) /* Page Table */
145 #define PT_MASK 0x02FF
147 #define LARGE_PAGE_SHIFT 24
149 static inline unsigned long hpt_hash(unsigned long vpn, int large)
162 return (vsid & 0x7fffffffffUL) ^ page;
165 static inline void __tlbie(unsigned long va, int large)
167 /* clear top 16 bits, non SLS segment */
168 va &= ~(0xffffULL << 48);
171 asm volatile("tlbie %0,1" : : "r"(va) : "memory");
173 asm volatile("tlbie %0,0" : : "r"(va) : "memory");
176 static inline void tlbie(unsigned long va, int large)
178 asm volatile("ptesync": : :"memory");
180 asm volatile("eieio; tlbsync; ptesync": : :"memory");
183 static inline void __tlbiel(unsigned long va)
185 /* clear top 16 bits, non SLS segment */
186 va &= ~(0xffffULL << 48);
189 * Thanks to Alan Modra we are now able to use machine specific
190 * assembly instructions (like tlbiel) by using the gas -many flag.
191 * However we have to support older toolchains so for the moment
195 asm volatile("tlbiel %0" : : "r"(va) : "memory");
197 asm volatile(".long 0x7c000224 | (%0 << 11)" : : "r"(va) : "memory");
201 static inline void tlbiel(unsigned long va)
203 asm volatile("ptesync": : :"memory");
205 asm volatile("ptesync": : :"memory");
209 * Handle a fault by adding an HPTE. If the address can't be determined
210 * to be valid via Linux page tables, return 1. If handled return 0
212 extern int __hash_page(unsigned long ea, unsigned long access,
213 unsigned long vsid, pte_t *ptep, unsigned long trap,
216 extern void htab_finish_init(void);
218 #endif /* __ASSEMBLY__ */
221 * Location of cpu0's segment table
223 #define STAB0_PAGE 0x9
224 #define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
225 #define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
227 #define SLB_NUM_BOLTED 2
228 #define SLB_CACHE_ENTRIES 8
230 /* Bits in the SLB ESID word */
231 #define SLB_ESID_V 0x0000000008000000 /* entry is valid */
233 /* Bits in the SLB VSID word */
234 #define SLB_VSID_SHIFT 12
235 #define SLB_VSID_KS 0x0000000000000800
236 #define SLB_VSID_KP 0x0000000000000400
237 #define SLB_VSID_N 0x0000000000000200 /* no-execute */
238 #define SLB_VSID_L 0x0000000000000100 /* largepage (4M) */
239 #define SLB_VSID_C 0x0000000000000080 /* class */
241 #define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
242 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
244 #define VSID_RANDOMIZER ASM_CONST(42470972311)
245 #define VSID_MASK 0xfffffffffUL
246 /* Because we never access addresses below KERNELBASE as kernel
247 * addresses, this VSID is never used for anything real, and will
248 * never have pages hashed into it */
249 #define BAD_VSID ASM_CONST(0)
251 /* Block size masks */
252 #define BL_128K 0x000
253 #define BL_256K 0x001
254 #define BL_512K 0x003
262 #define BL_128M 0x3FF
263 #define BL_256M 0x7FF
265 /* Used to set up SDR1 register */
266 #define HASH_TABLE_SIZE_64K 0x00010000
267 #define HASH_TABLE_SIZE_128K 0x00020000
268 #define HASH_TABLE_SIZE_256K 0x00040000
269 #define HASH_TABLE_SIZE_512K 0x00080000
270 #define HASH_TABLE_SIZE_1M 0x00100000
271 #define HASH_TABLE_SIZE_2M 0x00200000
272 #define HASH_TABLE_SIZE_4M 0x00400000
273 #define HASH_TABLE_MASK_64K 0x000
274 #define HASH_TABLE_MASK_128K 0x001
275 #define HASH_TABLE_MASK_256K 0x003
276 #define HASH_TABLE_MASK_512K 0x007
277 #define HASH_TABLE_MASK_1M 0x00F
278 #define HASH_TABLE_MASK_2M 0x01F
279 #define HASH_TABLE_MASK_4M 0x03F
281 /* These are the Ks and Kp from the PowerPC books. For proper operation,
285 #define MI_Ks 0x80000000 /* Should not be set */
286 #define MI_Kp 0x40000000 /* Should always be set */
288 /* The effective page number register. When read, contains the information
289 * about the last instruction TLB miss. When MI_RPN is written, bits in
290 * this register are used to create the TLB entry.
293 #define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
294 #define MI_EVALID 0x00000200 /* Entry is valid */
295 #define MI_ASIDMASK 0x0000000f /* ASID match value */
296 /* Reset value is undefined */
298 /* A "level 1" or "segment" or whatever you want to call it register.
299 * For the instruction TLB, it contains bits that get loaded into the
300 * TLB entry when the MI_RPN is written.
303 #define MI_APG 0x000001e0 /* Access protection group (0) */
304 #define MI_GUARDED 0x00000010 /* Guarded storage */
305 #define MI_PSMASK 0x0000000c /* Mask of page size bits */
306 #define MI_PS8MEG 0x0000000c /* 8M page size */
307 #define MI_PS512K 0x00000004 /* 512K page size */
308 #define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
309 #define MI_SVALID 0x00000001 /* Segment entry is valid */
310 /* Reset value is undefined */
312 /* Real page number. Defined by the pte. Writing this register
313 * causes a TLB entry to be created for the instruction TLB, using
314 * additional information from the MI_EPN, and MI_TWC registers.
318 /* Define an RPN value for mapping kernel memory to large virtual
319 * pages for boot initialization. This has real page number of 0,
320 * large page size, shared page, cache enabled, and valid.
321 * Also mark all subpages valid and write access.
323 #define MI_BOOTINIT 0x000001fd
325 #define MD_CTR 792 /* Data TLB control register */
326 #define MD_GPM 0x80000000 /* Set domain manager mode */
327 #define MD_PPM 0x40000000 /* Set subpage protection */
328 #define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
329 #define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
330 #define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
331 #define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
332 #define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
333 #define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
334 #define MD_RESETVAL 0x04000000 /* Value of register at reset */
336 #define M_CASID 793 /* Address space ID (context) to match */
337 #define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
340 /* These are the Ks and Kp from the PowerPC books. For proper operation,
344 #define MD_Ks 0x80000000 /* Should not be set */
345 #define MD_Kp 0x40000000 /* Should always be set */
347 /* The effective page number register. When read, contains the information
348 * about the last instruction TLB miss. When MD_RPN is written, bits in
349 * this register are used to create the TLB entry.
352 #define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
353 #define MD_EVALID 0x00000200 /* Entry is valid */
354 #define MD_ASIDMASK 0x0000000f /* ASID match value */
355 /* Reset value is undefined */
357 /* The pointer to the base address of the first level page table.
358 * During a software tablewalk, reading this register provides the address
359 * of the entry associated with MD_EPN.
362 #define M_L1TB 0xfffff000 /* Level 1 table base address */
363 #define M_L1INDX 0x00000ffc /* Level 1 index, when read */
364 /* Reset value is undefined */
366 /* A "level 1" or "segment" or whatever you want to call it register.
367 * For the data TLB, it contains bits that get loaded into the TLB entry
368 * when the MD_RPN is written. It is also provides the hardware assist
369 * for finding the PTE address during software tablewalk.
372 #define MD_L2TB 0xfffff000 /* Level 2 table base address */
373 #define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
374 #define MD_APG 0x000001e0 /* Access protection group (0) */
375 #define MD_GUARDED 0x00000010 /* Guarded storage */
376 #define MD_PSMASK 0x0000000c /* Mask of page size bits */
377 #define MD_PS8MEG 0x0000000c /* 8M page size */
378 #define MD_PS512K 0x00000004 /* 512K page size */
379 #define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
380 #define MD_WT 0x00000002 /* Use writethrough page attribute */
381 #define MD_SVALID 0x00000001 /* Segment entry is valid */
382 /* Reset value is undefined */
385 /* Real page number. Defined by the pte. Writing this register
386 * causes a TLB entry to be created for the data TLB, using
387 * additional information from the MD_EPN, and MD_TWC registers.
391 /* This is a temporary storage register that could be used to save
392 * a processor working register during a tablewalk.
396 #endif /* _PPC64_MMU_H_ */