2 * arch/ppc64/mm/slb_low.S
4 * Low-level SLB routines
6 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
8 * Based on earlier C version:
9 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
10 * Copyright (c) 2001 Dave Engebretsen
11 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
20 #include <asm/processor.h>
23 #include <asm/ppc_asm.h>
24 #include <asm/offsets.h>
25 #include <asm/cputable.h>
27 /* void slb_allocate(unsigned long ea);
29 * Create an SLB entry for the given EA (user or kernel).
30 * r3 = faulting address, r13 = PACA
31 * r9, r10, r11 are clobbered by this function
32 * No other registers are examined or changed.
36 * First find a slot, round robin. Previously we tried to find
37 * a free slot first but that took too long. Unfortunately we
38 * dont have any LRU information to help us choose a slot.
40 ld r10,PACASTABRR(r13)
43 /* use a cpu feature mask if we ever change our slb size */
44 cmpldi r10,SLB_NUM_ENTRIES
50 * Never cast out the segment for our kernel stack. Since we
51 * dont invalidate the ERAT we could have a valid translation
52 * for the kernel stack during the first part of exception exit
53 * which gets invalidated due to a tlbie from another cpu at a
54 * non recoverable point (after setting srr0/1) - Anton
59 * Use paca->ksave as the value of the kernel stack pointer,
60 * because this is valid at all times.
61 * The >> 27 (rather than >> 28) is so that the LSB is the
62 * valid bit - this way we check valid and ESID in one compare.
63 * In order to completely close the tiny race in the context
64 * switch (between updating r1 and updating paca->ksave),
65 * we check against both r1 and paca->ksave.
68 ori r9,r9,1 /* mangle SP for later compare */
77 std r10,PACASTABRR(r13)
79 /* r3 = faulting address, r10 = entry */
81 srdi r9,r3,60 /* get region */
82 srdi r3,r3,28 /* get esid */
83 cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
85 /* r9 = region, r3 = esid, cr7 = <>KERNELBASE */
88 bne- 8f /* invalid ea bits set */
91 blt- 8f /* invalid region */
93 /* r9 = region, r3 = esid, r10 = entry, cr7 = <>KERNELBASE */
95 blt cr7,0f /* user or kernel? */
98 li r11,SLB_VSID_KERNEL
101 li r11,(SLB_VSID_KERNEL|SLB_VSID_L)
102 END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
105 0: /* user address */
107 #ifdef CONFIG_HUGETLB_PAGE
109 /* check against the hugepage ranges */
110 cmpldi r3,(TASK_HPAGE_END>>SID_SHIFT)
111 bge 6f /* >= TASK_HPAGE_END */
112 cmpldi r3,(TASK_HPAGE_BASE>>SID_SHIFT)
113 bge 5f /* TASK_HPAGE_BASE..TASK_HPAGE_END */
115 bge 6f /* 4GB..TASK_HPAGE_BASE */
117 lhz r9,PACAHTLBSEGS(r13)
122 5: /* this is a hugepage user address */
123 li r11,(SLB_VSID_USER|SLB_VSID_L)
124 END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
125 #endif /* CONFIG_HUGETLB_PAGE */
127 6: ld r9,PACACONTEXTID(r13)
129 9: /* r9 = "context", r3 = esid, r11 = flags, r10 = entry */
131 rldimi r9,r3,15,0 /* r9= VSID ordinal */
133 7: rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */
134 oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
136 /* r9 = ordinal, r3 = esid, r11 = flags, r10 = esid_data */
138 li r3,VSID_RANDOMIZER@higher
140 oris r3,r3,VSID_RANDOMIZER@h
141 ori r3,r3,VSID_RANDOMIZER@l
143 mulld r9,r3,r9 /* r9 = ordinal * VSID_RANDOMIZER */
144 clrldi r9,r9,28 /* r9 &= VSID_MASK */
145 sldi r9,r9,SLB_VSID_SHIFT /* r9 <<= SLB_VSID_SHIFT */
146 or r9,r9,r11 /* r9 |= flags */
148 /* r9 = vsid_data, r10 = esid_data, cr7 = <>KERNELBASE */
151 * No need for an isync before or after this slbmte. The exception
152 * we enter with and the rfid we exit with are context synchronizing.
156 bgelr cr7 /* we're done for kernel addresses */
158 /* Update the slb cache */
159 lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
160 cmpldi r3,SLB_CACHE_ENTRIES
163 /* still room in the slb cache */
164 sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
165 rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
166 add r11,r11,r13 /* r11 = (u16 *)paca + offset */
167 sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
168 addi r3,r3,1 /* offset++ */
170 1: /* offset >= SLB_CACHE_ENTRIES */
171 li r3,SLB_CACHE_ENTRIES+1
173 sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
177 li r9,0 /* 0 VSID ordinal -> BAD_VSID */
178 li r11,SLB_VSID_USER /* flags don't much matter */