* a free slot first but that took too long. Unfortunately we
* dont have any LRU information to help us choose a slot.
*/
+#ifdef CONFIG_PPC_ISERIES
+ /*
+ * On iSeries, the "bolted" stack segment can be cast out on
+ * shared processor switch so we need to check for a miss on
+ * it and restore it to the right slot.
+ */
+ ld r9,PACAKSAVE(r13)
+ clrrdi r9,r9,28
+ clrrdi r11,r3,28
+ li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
+ cmpld r9,r11
+ beq 3f
+#endif /* CONFIG_PPC_ISERIES */
+
ld r10,PACASTABRR(r13)
-3:
addi r10,r10,1
/* use a cpu feature mask if we ever change our slb size */
cmpldi r10,SLB_NUM_ENTRIES
blt+ 4f
li r10,SLB_NUM_BOLTED
- /*
- * Never cast out the segment for our kernel stack. Since we
- * dont invalidate the ERAT we could have a valid translation
- * for the kernel stack during the first part of exception exit
- * which gets invalidated due to a tlbie from another cpu at a
- * non recoverable point (after setting srr0/1) - Anton
- */
-4: slbmfee r11,r10
- srdi r11,r11,27
- /*
- * Use paca->ksave as the value of the kernel stack pointer,
- * because this is valid at all times.
- * The >> 27 (rather than >> 28) is so that the LSB is the
- * valid bit - this way we check valid and ESID in one compare.
- * In order to completely close the tiny race in the context
- * switch (between updating r1 and updating paca->ksave),
- * we check against both r1 and paca->ksave.
- */
- srdi r9,r1,27
- ori r9,r9,1 /* mangle SP for later compare */
- cmpd r11,r9
- beq- 3b
- ld r9,PACAKSAVE(r13)
- srdi r9,r9,27
- ori r9,r9,1
- cmpd r11,r9
- beq- 3b
-
+4:
std r10,PACASTABRR(r13)
-
+3:
/* r3 = faulting address, r10 = entry */
srdi r9,r3,60 /* get region */
srdi r3,r3,28 /* get esid */
cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */
- /* r9 = region, r3 = esid, cr7 = <>KERNELBASE */
-
- rldicr. r11,r3,32,16
- bne- 8f /* invalid ea bits set */
- addi r11,r9,-1
- cmpldi r11,0xb
- blt- 8f /* invalid region */
+ rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */
+ oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
- /* r9 = region, r3 = esid, r10 = entry, cr7 = <>KERNELBASE */
+ /* r3 = esid, r10 = esid_data, cr7 = <>KERNELBASE */
blt cr7,0f /* user or kernel? */
- /* kernel address */
+ /* kernel address: proto-VSID = ESID */
+ /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
+ * this code will generate the protoVSID 0xfffffffff for the
+ * top segment. That's ok, the scramble below will translate
+ * it to VSID 0, which is reserved as a bad VSID - one which
+ * will never have any pages in it. */
li r11,SLB_VSID_KERNEL
BEGIN_FTR_SECTION
bne cr7,9f
END_FTR_SECTION_IFSET(CPU_FTR_16M_PAGE)
b 9f
-0: /* user address */
+0: /* user address: proto-VSID = context<<15 | ESID */
li r11,SLB_VSID_USER
+
+ srdi. r9,r3,13
+ bne- 8f /* invalid ea bits set */
+
#ifdef CONFIG_HUGETLB_PAGE
BEGIN_FTR_SECTION
/* check against the hugepage ranges */
#endif /* CONFIG_HUGETLB_PAGE */
6: ld r9,PACACONTEXTID(r13)
+ rldimi r3,r9,USER_ESID_BITS,0
-9: /* r9 = "context", r3 = esid, r11 = flags, r10 = entry */
-
- rldimi r9,r3,15,0 /* r9= VSID ordinal */
-
-7: rldimi r10,r3,28,0 /* r10= ESID<<28 | entry */
- oris r10,r10,SLB_ESID_V@h /* r10 |= SLB_ESID_V */
-
- /* r9 = ordinal, r3 = esid, r11 = flags, r10 = esid_data */
-
- li r3,VSID_RANDOMIZER@higher
- sldi r3,r3,32
- oris r3,r3,VSID_RANDOMIZER@h
- ori r3,r3,VSID_RANDOMIZER@l
-
- mulld r9,r3,r9 /* r9 = ordinal * VSID_RANDOMIZER */
- clrldi r9,r9,28 /* r9 &= VSID_MASK */
- sldi r9,r9,SLB_VSID_SHIFT /* r9 <<= SLB_VSID_SHIFT */
- or r9,r9,r11 /* r9 |= flags */
+9: /* r3 = protovsid, r11 = flags, r10 = esid_data, cr7 = <>KERNELBASE */
+ ASM_VSID_SCRAMBLE(r3,r9)
- /* r9 = vsid_data, r10 = esid_data, cr7 = <>KERNELBASE */
+ rldimi r11,r3,SLB_VSID_SHIFT,16 /* combine VSID and flags */
/*
* No need for an isync before or after this slbmte. The exception
* we enter with and the rfid we exit with are context synchronizing.
*/
- slbmte r9,r10
+ slbmte r11,r10
bgelr cr7 /* we're done for kernel addresses */
blr
8: /* invalid EA */
- li r9,0 /* 0 VSID ordinal -> BAD_VSID */
+ li r3,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
- b 7b
+ b 9b