2 * PowerPC64 Segment Translation Support.
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <asm/pgtable.h>
18 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
23 static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid);
24 static void make_slbe(unsigned long esid, unsigned long vsid, int large,
28 * Build an entry for the base kernel segment and put it into
29 * the segment table or SLB. All other segment table or SLB
30 * entries are faulted in.
32 void stab_initialize(unsigned long stab)
34 unsigned long esid, vsid;
35 int seg0_largepages = 0;
37 esid = GET_ESID(KERNELBASE);
38 vsid = get_kernel_vsid(esid << SID_SHIFT);
40 if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
43 if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
44 /* Invalidate the entire SLB & all the ERATS */
45 #ifdef CONFIG_PPC_ISERIES
46 asm volatile("isync; slbia; isync":::"memory");
48 asm volatile("isync":::"memory");
49 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
50 asm volatile("isync; slbia; isync":::"memory");
51 make_slbe(esid, vsid, seg0_largepages, 1);
52 asm volatile("isync":::"memory");
55 asm volatile("isync; slbia; isync":::"memory");
56 make_ste(stab, esid, vsid);
59 asm volatile("sync":::"memory");
63 /* Both the segment table and SLB code uses the following cache */
64 #define NR_STAB_CACHE_ENTRIES 8
65 DEFINE_PER_CPU(long, stab_cache_ptr);
66 DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
73 * Create a segment table entry for the given esid/vsid pair.
75 static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
77 unsigned long entry, group, old_esid, castout_entry, i;
78 unsigned int global_entry;
79 STE *ste, *castout_ste;
80 unsigned long kernel_segment = (REGION_ID(esid << SID_SHIFT) !=
83 /* Search the primary group first. */
84 global_entry = (esid & 0x1f) << 3;
85 ste = (STE *)(stab | ((esid & 0x1f) << 7));
87 /* Find an empty entry, if one exists. */
88 for (group = 0; group < 2; group++) {
89 for (entry = 0; entry < 8; entry++, ste++) {
90 if (!(ste->dw0.dw0.v)) {
93 ste->dw1.dw1.vsid = vsid;
94 ste->dw0.dw0.esid = esid;
98 asm volatile("eieio":::"memory");
100 return (global_entry | entry);
103 /* Now search the secondary group. */
104 global_entry = ((~esid) & 0x1f) << 3;
105 ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
109 * Could not find empty entry, pick one with a round robin selection.
110 * Search all entries in the two groups.
112 castout_entry = get_paca()->xStab_data.next_round_robin;
113 for (i = 0; i < 16; i++) {
114 if (castout_entry < 8) {
115 global_entry = (esid & 0x1f) << 3;
116 ste = (STE *)(stab | ((esid & 0x1f) << 7));
117 castout_ste = ste + castout_entry;
119 global_entry = ((~esid) & 0x1f) << 3;
120 ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
121 castout_ste = ste + (castout_entry - 8);
124 /* Dont cast out the first kernel segment */
125 if (castout_ste->dw0.dw0.esid != GET_ESID(KERNELBASE))
128 castout_entry = (castout_entry + 1) & 0xf;
131 get_paca()->xStab_data.next_round_robin = (castout_entry + 1) & 0xf;
133 /* Modify the old entry to the new value. */
135 /* Force previous translations to complete. DRENG */
136 asm volatile("isync" : : : "memory");
138 castout_ste->dw0.dw0.v = 0;
139 asm volatile("sync" : : : "memory"); /* Order update */
141 castout_ste->dw0.dword0 = 0;
142 castout_ste->dw1.dword1 = 0;
143 castout_ste->dw1.dw1.vsid = vsid;
144 old_esid = castout_ste->dw0.dw0.esid;
145 castout_ste->dw0.dw0.esid = esid;
146 castout_ste->dw0.dw0.kp = 1;
148 castout_ste->dw0.dw0.ks = 1;
149 asm volatile("eieio" : : : "memory"); /* Order update */
150 castout_ste->dw0.dw0.v = 1;
151 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
152 /* Ensure completion of slbie */
153 asm volatile("sync" : : : "memory");
155 return (global_entry | (castout_entry & 0x7));
158 static inline void __ste_allocate(unsigned long esid, unsigned long vsid)
160 unsigned char stab_entry;
161 unsigned long offset;
162 int region_id = REGION_ID(esid << SID_SHIFT);
164 stab_entry = make_ste(get_paca()->xStab_data.virt, esid, vsid);
166 if (region_id != USER_REGION_ID)
169 offset = __get_cpu_var(stab_cache_ptr);
170 if (offset < NR_STAB_CACHE_ENTRIES)
171 __get_cpu_var(stab_cache[offset++]) = stab_entry;
173 offset = NR_STAB_CACHE_ENTRIES+1;
174 __get_cpu_var(stab_cache_ptr) = offset;
178 * Allocate a segment table entry for the given ea.
180 int ste_allocate(unsigned long ea)
182 unsigned long vsid, esid;
183 mm_context_t context;
185 /* Check for invalid effective addresses. */
186 if (!IS_VALID_EA(ea))
189 /* Kernel or user address? */
190 if (REGION_ID(ea) >= KERNEL_REGION_ID) {
191 vsid = get_kernel_vsid(ea);
192 context = KERNEL_CONTEXT(ea);
197 context = current->mm->context;
198 vsid = get_vsid(context.id, ea);
202 __ste_allocate(esid, vsid);
204 asm volatile("sync":::"memory");
210 * preload some userspace segments into the segment table.
212 static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
214 unsigned long pc = KSTK_EIP(tsk);
215 unsigned long stack = KSTK_ESP(tsk);
216 unsigned long unmapped_base;
217 unsigned long pc_esid = GET_ESID(pc);
218 unsigned long stack_esid = GET_ESID(stack);
219 unsigned long unmapped_base_esid;
222 if (test_tsk_thread_flag(tsk, TIF_32BIT))
223 unmapped_base = TASK_UNMAPPED_BASE_USER32;
225 unmapped_base = TASK_UNMAPPED_BASE_USER64;
227 unmapped_base_esid = GET_ESID(unmapped_base);
229 if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
231 vsid = get_vsid(mm->context.id, pc);
232 __ste_allocate(pc_esid, vsid);
234 if (pc_esid == stack_esid)
237 if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
239 vsid = get_vsid(mm->context.id, stack);
240 __ste_allocate(stack_esid, vsid);
242 if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
245 if (!IS_VALID_EA(unmapped_base) ||
246 (REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
248 vsid = get_vsid(mm->context.id, unmapped_base);
249 __ste_allocate(unmapped_base_esid, vsid);
252 asm volatile("sync" : : : "memory");
255 /* Flush all user entries from the segment table of the current processor. */
256 void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
258 STE *stab = (STE *) get_paca()->xStab_data.virt;
260 unsigned long offset = __get_cpu_var(stab_cache_ptr);
262 /* Force previous translations to complete. DRENG */
263 asm volatile("isync" : : : "memory");
265 if (offset <= NR_STAB_CACHE_ENTRIES) {
268 for (i = 0; i < offset; i++) {
269 ste = stab + __get_cpu_var(stab_cache[i]);
275 /* Invalidate all entries. */
278 /* Never flush the first entry. */
281 entry < (PAGE_SIZE / sizeof(STE));
284 ea = ste->dw0.dw0.esid << SID_SHIFT;
285 if (ea < KERNELBASE) {
291 asm volatile("sync; slbia; sync":::"memory");
293 __get_cpu_var(stab_cache_ptr) = 0;
295 preload_stab(tsk, mm);
303 * Create a segment buffer entry for the given esid/vsid pair.
305 * NOTE: A context syncronising instruction is required before and after
306 * this, in the common case we use exception entry and rfid.
308 static void make_slbe(unsigned long esid, unsigned long vsid, int large,
311 unsigned long entry, castout_entry;
322 * We take the next entry, round robin. Previously we tried
323 * to find a free slot first but that took too long. Unfortunately
324 * we dont have any LRU information to help us choose a slot.
328 * Never cast out the segment for our kernel stack. Since we
329 * dont invalidate the ERAT we could have a valid translation
330 * for the kernel stack during the first part of exception exit
331 * which gets invalidated due to a tlbie from another cpu at a
332 * non recoverable point (after setting srr0/1) - Anton
334 castout_entry = get_paca()->xStab_data.next_round_robin;
336 entry = castout_entry;
338 if (castout_entry >= naca->slb_size)
340 asm volatile("slbmfee %0,%1" : "=r" (esid_data) : "r" (entry));
341 } while (esid_data.data.v &&
342 esid_data.data.esid == GET_ESID(__get_SP()));
344 get_paca()->xStab_data.next_round_robin = castout_entry;
346 /* slbie not needed as the previous mapping is still valid. */
349 * Write the new SLB entry.
352 vsid_data.data.vsid = vsid;
353 vsid_data.data.kp = 1;
355 vsid_data.data.l = 1;
357 vsid_data.data.c = 1;
359 vsid_data.data.ks = 1;
362 esid_data.data.esid = esid;
363 esid_data.data.v = 1;
364 esid_data.data.index = entry;
367 * No need for an isync before or after this slbmte. The exception
368 * we enter with and the rfid we exit with are context synchronizing.
370 asm volatile("slbmte %0,%1" : : "r" (vsid_data), "r" (esid_data));
373 static inline void __slb_allocate(unsigned long esid, unsigned long vsid,
374 mm_context_t context)
377 int region_id = REGION_ID(esid << SID_SHIFT);
378 unsigned long offset;
380 if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) {
381 if (region_id == KERNEL_REGION_ID)
383 else if (region_id == USER_REGION_ID)
384 large = in_hugepage_area(context, esid << SID_SHIFT);
387 make_slbe(esid, vsid, large, region_id != USER_REGION_ID);
389 if (region_id != USER_REGION_ID)
392 offset = __get_cpu_var(stab_cache_ptr);
393 if (offset < NR_STAB_CACHE_ENTRIES)
394 __get_cpu_var(stab_cache[offset++]) = esid;
396 offset = NR_STAB_CACHE_ENTRIES+1;
397 __get_cpu_var(stab_cache_ptr) = offset;
401 * Allocate a segment table entry for the given ea.
403 int slb_allocate(unsigned long ea)
405 unsigned long vsid, esid;
406 mm_context_t context;
408 /* Check for invalid effective addresses. */
409 if (unlikely(!IS_VALID_EA(ea)))
412 /* Kernel or user address? */
413 if (REGION_ID(ea) >= KERNEL_REGION_ID) {
414 context = KERNEL_CONTEXT(ea);
415 vsid = get_kernel_vsid(ea);
417 if (unlikely(!current->mm))
420 context = current->mm->context;
421 vsid = get_vsid(context.id, ea);
425 __slb_allocate(esid, vsid, context);
431 * preload some userspace segments into the SLB.
433 static void preload_slb(struct task_struct *tsk, struct mm_struct *mm)
435 unsigned long pc = KSTK_EIP(tsk);
436 unsigned long stack = KSTK_ESP(tsk);
437 unsigned long unmapped_base;
438 unsigned long pc_esid = GET_ESID(pc);
439 unsigned long stack_esid = GET_ESID(stack);
440 unsigned long unmapped_base_esid;
443 if (test_tsk_thread_flag(tsk, TIF_32BIT))
444 unmapped_base = TASK_UNMAPPED_BASE_USER32;
446 unmapped_base = TASK_UNMAPPED_BASE_USER64;
448 unmapped_base_esid = GET_ESID(unmapped_base);
450 if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
452 vsid = get_vsid(mm->context.id, pc);
453 __slb_allocate(pc_esid, vsid, mm->context);
455 if (pc_esid == stack_esid)
458 if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
460 vsid = get_vsid(mm->context.id, stack);
461 __slb_allocate(stack_esid, vsid, mm->context);
463 if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
466 if (!IS_VALID_EA(unmapped_base) ||
467 (REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
469 vsid = get_vsid(mm->context.id, unmapped_base);
470 __slb_allocate(unmapped_base_esid, vsid, mm->context);
473 /* Flush all user entries from the segment table of the current processor. */
474 void flush_slb(struct task_struct *tsk, struct mm_struct *mm)
476 unsigned long offset = __get_cpu_var(stab_cache_ptr);
483 if (offset <= NR_STAB_CACHE_ENTRIES) {
485 asm volatile("isync" : : : "memory");
486 for (i = 0; i < offset; i++) {
488 esid_data.data.esid = __get_cpu_var(stab_cache[i]);
489 asm volatile("slbie %0" : : "r" (esid_data));
491 asm volatile("isync" : : : "memory");
493 asm volatile("isync; slbia; isync" : : : "memory");
496 /* Workaround POWER5 < DD2.1 issue */
497 if (offset == 1 || offset > NR_STAB_CACHE_ENTRIES) {
499 * flush segment in EEH region, we dont normally access
500 * addresses in this region.
503 esid_data.data.esid = EEH_REGION_ID;
504 asm volatile("slbie %0" : : "r" (esid_data));
507 __get_cpu_var(stab_cache_ptr) = 0;
509 preload_slb(tsk, mm);