2 * PowerPC64 SLB support.
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/config.h>
18 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
23 #include <asm/cputable.h>
25 extern void slb_allocate(unsigned long ea);
27 static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot)
29 return (ea & ESID_MASK) | SLB_ESID_V | slot;
32 static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
34 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
37 static inline void create_slbe(unsigned long ea, unsigned long vsid,
38 unsigned long flags, unsigned long entry)
40 asm volatile("slbmte %0,%1" :
41 : "r" (mk_vsid_data(ea, flags)),
42 "r" (mk_esid_data(ea, entry))
46 static void slb_flush_and_rebolt(void)
48 /* If you change this make sure you change SLB_NUM_BOLTED
49 * appropriately too. */
50 unsigned long ksp_flags = SLB_VSID_KERNEL;
51 unsigned long ksp_esid_data;
53 WARN_ON(!irqs_disabled());
55 if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
56 ksp_flags |= SLB_VSID_L;
58 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
59 if ((ksp_esid_data & ESID_MASK) == KERNELBASE)
60 ksp_esid_data &= ~SLB_ESID_V;
62 /* We need to do this all in asm, so we're sure we don't touch
63 * the stack between the slbia and rebolting it. */
64 asm volatile("isync\n"
66 /* Slot 1 - first VMALLOC segment */
68 /* Slot 2 - kernel stack */
71 :: "r"(mk_vsid_data(VMALLOCBASE, SLB_VSID_KERNEL)),
72 "r"(mk_esid_data(VMALLOCBASE, 1)),
73 "r"(mk_vsid_data(ksp_esid_data, ksp_flags)),
78 /* Flush all user entries from the segment table of the current processor. */
79 void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
81 unsigned long offset = get_paca()->slb_cache_ptr;
82 unsigned long esid_data;
83 unsigned long pc = KSTK_EIP(tsk);
84 unsigned long stack = KSTK_ESP(tsk);
85 unsigned long unmapped_base;
87 if (offset <= SLB_CACHE_ENTRIES) {
89 asm volatile("isync" : : : "memory");
90 for (i = 0; i < offset; i++) {
91 esid_data = (unsigned long)get_paca()->slb_cache[i]
93 asm volatile("slbie %0" : : "r" (esid_data));
95 asm volatile("isync" : : : "memory");
97 slb_flush_and_rebolt();
100 /* Workaround POWER5 < DD2.1 issue */
101 if (offset == 1 || offset > SLB_CACHE_ENTRIES) {
102 /* flush segment in EEH region, we shouldn't ever
103 * access addresses in this region. */
104 asm volatile("slbie %0" : : "r"(EEHREGIONBASE));
107 get_paca()->slb_cache_ptr = 0;
108 get_paca()->context = mm->context;
111 * preload some userspace segments into the SLB.
113 if (test_tsk_thread_flag(tsk, TIF_32BIT))
114 unmapped_base = TASK_UNMAPPED_BASE_USER32;
116 unmapped_base = TASK_UNMAPPED_BASE_USER64;
118 if (pc >= KERNELBASE)
122 if (GET_ESID(pc) == GET_ESID(stack))
125 if (stack >= KERNELBASE)
129 if ((GET_ESID(pc) == GET_ESID(unmapped_base))
130 || (GET_ESID(stack) == GET_ESID(unmapped_base)))
133 if (unmapped_base >= KERNELBASE)
135 slb_allocate(unmapped_base);
138 void slb_initialize(void)
140 /* On iSeries the bolted entries have already been set up by
141 * the hypervisor from the lparMap data in head.S */
142 #ifndef CONFIG_PPC_ISERIES
143 unsigned long flags = SLB_VSID_KERNEL;
145 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
146 if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
149 asm volatile("isync":::"memory");
150 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
151 asm volatile("isync; slbia; isync":::"memory");
152 create_slbe(KERNELBASE, get_kernel_vsid(KERNELBASE), flags, 0);
153 create_slbe(VMALLOCBASE, get_kernel_vsid(KERNELBASE),
155 /* We don't bolt the stack for the time being - we're in boot,
156 * so the stack is in the bolted segment. By the time it goes
157 * elsewhere, we'll call _switch() which will bolt in the new
159 asm volatile("isync":::"memory");
162 get_paca()->stab_rr = SLB_NUM_BOLTED;