static void make_slbe(unsigned long esid, unsigned long vsid, int large,
int kernel_segment);
+static inline void slb_add_bolted(void)
+{
+#ifndef CONFIG_PPC_ISERIES
+ unsigned long esid = GET_ESID(VMALLOCBASE);
+ unsigned long vsid = get_kernel_vsid(VMALLOCBASE);
+
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * Bolt in the first vmalloc segment. Since modules end
+ * up there it gets hit very heavily.
+ */
+ get_paca()->stab_next_rr = 1;
+ make_slbe(esid, vsid, 0, 1);
+#endif
+}
+
/*
* Build an entry for the base kernel segment and put it into
* the segment table or SLB. All other segment table or SLB
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
+ get_paca()->stab_next_rr = 0;
make_slbe(esid, vsid, seg0_largepages, 1);
asm volatile("isync":::"memory");
#endif
+
+ slb_add_bolted();
} else {
asm volatile("isync; slbia; isync":::"memory");
make_ste(stab, esid, vsid);
* Could not find empty entry, pick one with a round robin selection.
* Search all entries in the two groups.
*/
- castout_entry = get_paca()->xStab_data.next_round_robin;
+ castout_entry = get_paca()->stab_next_rr;
for (i = 0; i < 16; i++) {
if (castout_entry < 8) {
global_entry = (esid & 0x1f) << 3;
castout_entry = (castout_entry + 1) & 0xf;
}
- get_paca()->xStab_data.next_round_robin = (castout_entry + 1) & 0xf;
+ get_paca()->stab_next_rr = (castout_entry + 1) & 0xf;
/* Modify the old entry to the new value. */
unsigned long offset;
int region_id = REGION_ID(esid << SID_SHIFT);
- stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid);
+ stab_entry = make_ste(get_paca()->stab_addr, esid, vsid);
if (region_id != USER_REGION_ID)
return;
/* Flush all user entries from the segment table of the current processor. */
void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
{
- STE *stab = (STE *) get_paca()->xStab_data.virt;
+ STE *stab = (STE *) get_paca()->stab_addr;
STE *ste;
unsigned long offset = __get_cpu_var(stab_cache_ptr);
unsigned long word0;
slb_dword1 data;
} vsid_data;
+ struct paca_struct *lpaca = get_paca();
/*
* We take the next entry, round robin. Previously we tried
* for the kernel stack during the first part of exception exit
* which gets invalidated due to a tlbie from another cpu at a
* non recoverable point (after setting srr0/1) - Anton
+ *
+ * paca Ksave is always valid (even when on the interrupt stack)
+ * so we use that.
*/
- castout_entry = get_paca()->xStab_data.next_round_robin;
+ castout_entry = lpaca->stab_next_rr;
do {
entry = castout_entry;
castout_entry++;
- if (castout_entry >= naca->slb_size)
- castout_entry = 1;
+ /*
+ * We bolt in the first kernel segment and the first
+ * vmalloc segment.
+ */
+ if (castout_entry >= SLB_NUM_ENTRIES)
+ castout_entry = 2;
asm volatile("slbmfee %0,%1" : "=r" (esid_data) : "r" (entry));
} while (esid_data.data.v &&
- esid_data.data.esid == GET_ESID(__get_SP()));
+ esid_data.data.esid == GET_ESID(lpaca->kstack));
- get_paca()->xStab_data.next_round_robin = castout_entry;
+ lpaca->stab_next_rr = castout_entry;
/* slbie not needed as the previous mapping is still valid. */
}
esid = GET_ESID(ea);
+#ifndef CONFIG_PPC_ISERIES
+ BUG_ON((esid << SID_SHIFT) == VMALLOCBASE);
+#endif
__slb_allocate(esid, vsid, context);
return 0;
slb_dword0 data;
} esid_data;
-
if (offset <= NR_STAB_CACHE_ENTRIES) {
int i;
asm volatile("isync" : : : "memory");
for (i = 0; i < offset; i++) {
esid_data.word0 = 0;
esid_data.data.esid = __get_cpu_var(stab_cache[i]);
+ BUG_ON(esid_data.data.esid == GET_ESID(VMALLOCBASE));
asm volatile("slbie %0" : : "r" (esid_data));
}
asm volatile("isync" : : : "memory");
} else {
asm volatile("isync; slbia; isync" : : : "memory");
+ slb_add_bolted();
}
/* Workaround POWER5 < DD2.1 issue */