2 * PowerPC64 Segment Translation Support.
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <asm/pgtable.h>
18 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
23 static int make_ste(unsigned long stab, unsigned long esid,
26 void slb_initialize(void);
29 * Build an entry for the base kernel segment and put it into
30 * the segment table or SLB. All other segment table or SLB
31 * entries are faulted in.
33 void stab_initialize(unsigned long stab)
35 unsigned long vsid = get_kernel_vsid(KERNELBASE);
37 if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
40 asm volatile("isync; slbia; isync":::"memory");
41 make_ste(stab, GET_ESID(KERNELBASE), vsid);
44 asm volatile("sync":::"memory");
48 /* Both the segment table and SLB code uses the following cache */
49 #define NR_STAB_CACHE_ENTRIES 8
50 DEFINE_PER_CPU(long, stab_cache_ptr);
51 DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
58 * Create a segment table entry for the given esid/vsid pair.
60 static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
62 unsigned long entry, group, old_esid, castout_entry, i;
63 unsigned int global_entry;
64 STE *ste, *castout_ste;
65 unsigned long kernel_segment = (REGION_ID(esid << SID_SHIFT) !=
68 /* Search the primary group first. */
69 global_entry = (esid & 0x1f) << 3;
70 ste = (STE *)(stab | ((esid & 0x1f) << 7));
72 /* Find an empty entry, if one exists. */
73 for (group = 0; group < 2; group++) {
74 for (entry = 0; entry < 8; entry++, ste++) {
75 if (!(ste->dw0.dw0.v)) {
78 ste->dw1.dw1.vsid = vsid;
79 ste->dw0.dw0.esid = esid;
83 asm volatile("eieio":::"memory");
85 return (global_entry | entry);
88 /* Now search the secondary group. */
89 global_entry = ((~esid) & 0x1f) << 3;
90 ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
94 * Could not find empty entry, pick one with a round robin selection.
95 * Search all entries in the two groups.
97 castout_entry = get_paca()->stab_rr;
98 for (i = 0; i < 16; i++) {
99 if (castout_entry < 8) {
100 global_entry = (esid & 0x1f) << 3;
101 ste = (STE *)(stab | ((esid & 0x1f) << 7));
102 castout_ste = ste + castout_entry;
104 global_entry = ((~esid) & 0x1f) << 3;
105 ste = (STE *)(stab | (((~esid) & 0x1f) << 7));
106 castout_ste = ste + (castout_entry - 8);
109 /* Dont cast out the first kernel segment */
110 if (castout_ste->dw0.dw0.esid != GET_ESID(KERNELBASE))
113 castout_entry = (castout_entry + 1) & 0xf;
116 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
118 /* Modify the old entry to the new value. */
120 /* Force previous translations to complete. DRENG */
121 asm volatile("isync" : : : "memory");
123 castout_ste->dw0.dw0.v = 0;
124 asm volatile("sync" : : : "memory"); /* Order update */
126 castout_ste->dw0.dword0 = 0;
127 castout_ste->dw1.dword1 = 0;
128 castout_ste->dw1.dw1.vsid = vsid;
129 old_esid = castout_ste->dw0.dw0.esid;
130 castout_ste->dw0.dw0.esid = esid;
131 castout_ste->dw0.dw0.kp = 1;
133 castout_ste->dw0.dw0.ks = 1;
134 asm volatile("eieio" : : : "memory"); /* Order update */
135 castout_ste->dw0.dw0.v = 1;
136 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
137 /* Ensure completion of slbie */
138 asm volatile("sync" : : : "memory");
140 return (global_entry | (castout_entry & 0x7));
143 static inline void __ste_allocate(unsigned long esid, unsigned long vsid)
145 unsigned char stab_entry;
146 unsigned long offset;
147 int region_id = REGION_ID(esid << SID_SHIFT);
149 stab_entry = make_ste(get_paca()->stab_addr, esid, vsid);
151 if (region_id != USER_REGION_ID)
154 offset = __get_cpu_var(stab_cache_ptr);
155 if (offset < NR_STAB_CACHE_ENTRIES)
156 __get_cpu_var(stab_cache[offset++]) = stab_entry;
158 offset = NR_STAB_CACHE_ENTRIES+1;
159 __get_cpu_var(stab_cache_ptr) = offset;
163 * Allocate a segment table entry for the given ea.
165 int ste_allocate(unsigned long ea)
167 unsigned long vsid, esid;
168 mm_context_t context;
170 /* Check for invalid effective addresses. */
171 if (!IS_VALID_EA(ea))
174 /* Kernel or user address? */
175 if (REGION_ID(ea) >= KERNEL_REGION_ID) {
176 vsid = get_kernel_vsid(ea);
177 context = KERNEL_CONTEXT(ea);
182 context = current->mm->context;
183 vsid = get_vsid(context.id, ea);
187 __ste_allocate(esid, vsid);
189 asm volatile("sync":::"memory");
195 * preload some userspace segments into the segment table.
197 static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
199 unsigned long pc = KSTK_EIP(tsk);
200 unsigned long stack = KSTK_ESP(tsk);
201 unsigned long unmapped_base;
202 unsigned long pc_esid = GET_ESID(pc);
203 unsigned long stack_esid = GET_ESID(stack);
204 unsigned long unmapped_base_esid;
207 if (test_tsk_thread_flag(tsk, TIF_32BIT))
208 unmapped_base = TASK_UNMAPPED_BASE_USER32;
210 unmapped_base = TASK_UNMAPPED_BASE_USER64;
212 unmapped_base_esid = GET_ESID(unmapped_base);
214 if (!IS_VALID_EA(pc) || (REGION_ID(pc) >= KERNEL_REGION_ID))
216 vsid = get_vsid(mm->context.id, pc);
217 __ste_allocate(pc_esid, vsid);
219 if (pc_esid == stack_esid)
222 if (!IS_VALID_EA(stack) || (REGION_ID(stack) >= KERNEL_REGION_ID))
224 vsid = get_vsid(mm->context.id, stack);
225 __ste_allocate(stack_esid, vsid);
227 if (pc_esid == unmapped_base_esid || stack_esid == unmapped_base_esid)
230 if (!IS_VALID_EA(unmapped_base) ||
231 (REGION_ID(unmapped_base) >= KERNEL_REGION_ID))
233 vsid = get_vsid(mm->context.id, unmapped_base);
234 __ste_allocate(unmapped_base_esid, vsid);
237 asm volatile("sync" : : : "memory");
240 /* Flush all user entries from the segment table of the current processor. */
241 void flush_stab(struct task_struct *tsk, struct mm_struct *mm)
243 STE *stab = (STE *) get_paca()->stab_addr;
245 unsigned long offset = __get_cpu_var(stab_cache_ptr);
247 /* Force previous translations to complete. DRENG */
248 asm volatile("isync" : : : "memory");
250 if (offset <= NR_STAB_CACHE_ENTRIES) {
253 for (i = 0; i < offset; i++) {
254 ste = stab + __get_cpu_var(stab_cache[i]);
260 /* Invalidate all entries. */
263 /* Never flush the first entry. */
266 entry < (PAGE_SIZE / sizeof(STE));
269 ea = ste->dw0.dw0.esid << SID_SHIFT;
270 if (ea < KERNELBASE) {
276 asm volatile("sync; slbia; sync":::"memory");
278 __get_cpu_var(stab_cache_ptr) = 0;
280 preload_stab(tsk, mm);