static void make_slbe(unsigned long esid, unsigned long vsid, int large,
int kernel_segment);
+static inline void slb_add_bolted(void)
+{
+#ifndef CONFIG_PPC_ISERIES
+ unsigned long esid = GET_ESID(VMALLOCBASE);
+ unsigned long vsid = get_kernel_vsid(VMALLOCBASE);
+
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * Bolt in the first vmalloc segment. Since modules end
+ * up there it gets hit very heavily.
+ */
+ get_paca()->xStab_data.next_round_robin = 1;
+ make_slbe(esid, vsid, 0, 1);
+#endif
+}
+
/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
+ get_paca()->xStab_data.next_round_robin = 0;
make_slbe(esid, vsid, seg0_largepages, 1);
asm volatile("isync":::"memory");
#endif
+
+ slb_add_bolted();
} else {
asm volatile("isync; slbia; isync":::"memory");
make_ste(stab, esid, vsid);
unsigned long word0;
slb_dword1 data;
} vsid_data;
+ struct paca_struct *lpaca = get_paca();
/*
* We take the next entry, round robin. Previously we tried
* for the kernel stack during the first part of exception exit
* which gets invalidated due to a tlbie from another cpu at a
* non recoverable point (after setting srr0/1) - Anton
+ *
+ * paca Ksave is always valid (even when on the interrupt stack)
+ * so we use that.
*/
- castout_entry = get_paca()->xStab_data.next_round_robin;
+ castout_entry = lpaca->xStab_data.next_round_robin;
do {
entry = castout_entry;
castout_entry++;
- if (castout_entry >= naca->slb_size)
- castout_entry = 1;
+ /*
+ * We bolt in the first kernel segment and the first
+ * vmalloc segment.
+ */
+ if (castout_entry >= SLB_NUM_ENTRIES)
+ castout_entry = 2;
asm volatile("slbmfee %0,%1" : "=r" (esid_data) : "r" (entry));
} while (esid_data.data.v &&
- esid_data.data.esid == GET_ESID(__get_SP()));
+ esid_data.data.esid == GET_ESID(lpaca->xKsave));
- get_paca()->xStab_data.next_round_robin = castout_entry;
+ lpaca->xStab_data.next_round_robin = castout_entry;
/* slbie not needed as the previous mapping is still valid. */
}
esid = GET_ESID(ea);
+#ifndef CONFIG_PPC_ISERIES
+ BUG_ON((esid << SID_SHIFT) == VMALLOCBASE);
+#endif
__slb_allocate(esid, vsid, context);
return 0;
slb_dword0 data;
} esid_data;
-
if (offset <= NR_STAB_CACHE_ENTRIES) {
int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
esid_data.word0 = 0;
esid_data.data.esid = __get_cpu_var(stab_cache[i]);
+ BUG_ON(esid_data.data.esid == GET_ESID(VMALLOCBASE));
asm volatile("slbie %0" : : "r" (esid_data));
}
asm volatile("isync" : : : "memory");
} else {
asm volatile("isync; slbia; isync" : : : "memory");
+ slb_add_bolted();
}
/* Workaround POWER5 < DD2.1 issue */