2 * PowerPC64 Segment Translation Support.
4 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
5 * Copyright (c) 2001 Dave Engebretsen
7 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <asm/pgtable.h>
18 #include <asm/mmu_context.h>
21 #include <asm/cputable.h>
23 /* Both the segment table and SLB code uses the following cache */
24 #define NR_STAB_CACHE_ENTRIES 8
25 DEFINE_PER_CPU(long, stab_cache_ptr);
26 DEFINE_PER_CPU(long, stab_cache[NR_STAB_CACHE_ENTRIES]);
29 * Create a segment table entry for the given esid/vsid pair.
31 static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
33 unsigned long esid_data, vsid_data;
34 unsigned long entry, group, old_esid, castout_entry, i;
35 unsigned int global_entry;
36 struct stab_entry *ste, *castout_ste;
37 unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE;
39 vsid_data = vsid << STE_VSID_SHIFT;
40 esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V;
42 esid_data |= STE_ESID_KS;
44 /* Search the primary group first. */
45 global_entry = (esid & 0x1f) << 3;
46 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
48 /* Find an empty entry, if one exists. */
49 for (group = 0; group < 2; group++) {
50 for (entry = 0; entry < 8; entry++, ste++) {
51 if (!(ste->esid_data & STE_ESID_V)) {
52 ste->vsid_data = vsid_data;
53 asm volatile("eieio":::"memory");
54 ste->esid_data = esid_data;
55 return (global_entry | entry);
58 /* Now search the secondary group. */
59 global_entry = ((~esid) & 0x1f) << 3;
60 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
64 * Could not find empty entry, pick one with a round robin selection.
65 * Search all entries in the two groups.
67 castout_entry = get_paca()->stab_rr;
68 for (i = 0; i < 16; i++) {
69 if (castout_entry < 8) {
70 global_entry = (esid & 0x1f) << 3;
71 ste = (struct stab_entry *)(stab | ((esid & 0x1f) << 7));
72 castout_ste = ste + castout_entry;
74 global_entry = ((~esid) & 0x1f) << 3;
75 ste = (struct stab_entry *)(stab | (((~esid) & 0x1f) << 7));
76 castout_ste = ste + (castout_entry - 8);
79 /* Dont cast out the first kernel segment */
80 if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE)
83 castout_entry = (castout_entry + 1) & 0xf;
86 get_paca()->stab_rr = (castout_entry + 1) & 0xf;
88 /* Modify the old entry to the new value. */
90 /* Force previous translations to complete. DRENG */
91 asm volatile("isync" : : : "memory");
93 old_esid = castout_ste->esid_data >> SID_SHIFT;
94 castout_ste->esid_data = 0; /* Invalidate old entry */
96 asm volatile("sync" : : : "memory"); /* Order update */
98 castout_ste->vsid_data = vsid_data;
99 asm volatile("eieio" : : : "memory"); /* Order update */
100 castout_ste->esid_data = esid_data;
102 asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
103 /* Ensure completion of slbie */
104 asm volatile("sync" : : : "memory");
106 return (global_entry | (castout_entry & 0x7));
110 * Allocate a segment table entry for the given ea and mm
112 static int __ste_allocate(unsigned long ea, struct mm_struct *mm)
115 unsigned char stab_entry;
116 unsigned long offset;
118 /* Kernel or user address? */
119 if (ea >= KERNELBASE) {
120 vsid = get_kernel_vsid(ea);
122 if ((ea >= TASK_SIZE_USER64) || (! mm))
125 vsid = get_vsid(mm->context.id, ea);
128 stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid);
130 if (ea < KERNELBASE) {
131 offset = __get_cpu_var(stab_cache_ptr);
132 if (offset < NR_STAB_CACHE_ENTRIES)
133 __get_cpu_var(stab_cache[offset++]) = stab_entry;
135 offset = NR_STAB_CACHE_ENTRIES+1;
136 __get_cpu_var(stab_cache_ptr) = offset;
139 asm volatile("sync":::"memory");
145 int ste_allocate(unsigned long ea)
147 return __ste_allocate(ea, current->mm);
151 * Do the segment table work for a context switch: flush all user
152 * entries from the table, then preload some probably useful entries
155 void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
157 struct stab_entry *stab = (struct stab_entry *) get_paca()->stab_addr;
158 struct stab_entry *ste;
159 unsigned long offset = __get_cpu_var(stab_cache_ptr);
160 unsigned long pc = KSTK_EIP(tsk);
161 unsigned long stack = KSTK_ESP(tsk);
162 unsigned long unmapped_base;
164 /* Force previous translations to complete. DRENG */
165 asm volatile("isync" : : : "memory");
167 if (offset <= NR_STAB_CACHE_ENTRIES) {
170 for (i = 0; i < offset; i++) {
171 ste = stab + __get_cpu_var(stab_cache[i]);
172 ste->esid_data = 0; /* invalidate entry */
177 /* Invalidate all entries. */
180 /* Never flush the first entry. */
183 entry < (PAGE_SIZE / sizeof(struct stab_entry));
186 ea = ste->esid_data & ESID_MASK;
187 if (ea < KERNELBASE) {
193 asm volatile("sync; slbia; sync":::"memory");
195 __get_cpu_var(stab_cache_ptr) = 0;
197 /* Now preload some entries for the new task */
198 if (test_tsk_thread_flag(tsk, TIF_32BIT))
199 unmapped_base = TASK_UNMAPPED_BASE_USER32;
201 unmapped_base = TASK_UNMAPPED_BASE_USER64;
203 __ste_allocate(pc, mm);
205 if (GET_ESID(pc) == GET_ESID(stack))
208 __ste_allocate(stack, mm);
210 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
211 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
214 __ste_allocate(unmapped_base, mm);
217 asm volatile("sync" : : : "memory");
220 extern void slb_initialize(void);
223 * Build an entry for the base kernel segment and put it into
224 * the segment table or SLB. All other segment table or SLB
225 * entries are faulted in.
227 void stab_initialize(unsigned long stab)
229 unsigned long vsid = get_kernel_vsid(KERNELBASE);
231 if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
234 asm volatile("isync; slbia; isync":::"memory");
235 make_ste(stab, GET_ESID(KERNELBASE), vsid);
238 asm volatile("sync":::"memory");