1 #ifndef __PPC64_MMU_CONTEXT_H
2 #define __PPC64_MMU_CONTEXT_H
4 #include <linux/config.h>
5 #include <linux/kernel.h>
8 #include <asm/cputable.h>
11 * Copyright (C) 2001 PPC 64 Team, IBM Corp
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
20 * Every architecture must define this function. It's the fastest
21 * way of searching a 140-bit bitmap where the first 100 bits are
22 * unlikely to be set. It's guaranteed that at least one of the 140
25 static inline int sched_find_first_bit(unsigned long *b)
30 return __ffs(b[1]) + 64;
31 return __ffs(b[2]) + 128;
34 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
39 #define MAX_CONTEXT (0x100000-1)
41 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
42 extern void destroy_context(struct mm_struct *mm);
44 extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
45 extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
48 * switch_mm is the entry point called from the architecture independent
49 * code in kernel/sched.c
51 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
52 struct task_struct *tsk)
58 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
60 #endif /* CONFIG_ALTIVEC */
62 if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
63 cpu_set(smp_processor_id(), next->cpu_vm_mask);
65 /* No need to flush userspace segments if the mm doesnt change */
69 if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
70 switch_slb(tsk, next);
72 switch_stab(tsk, next);
75 #define deactivate_mm(tsk,mm) do { } while (0)
78 * After we have set current->mm to a new value, this activates
79 * the context for the new mm so we see the new mappings.
81 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
85 local_irq_save(flags);
86 switch_mm(prev, next, current);
87 local_irq_restore(flags);
93 * We first generate a 36-bit "proto-VSID". For kernel addresses this
94 * is equal to the ESID, for user addresses it is:
95 * (context << 15) | (esid & 0x7fff)
97 * The two forms are distinguishable because the top bit is 0 for user
98 * addresses, whereas the top two bits are 1 for kernel addresses.
99 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
102 * The proto-VSIDs are then scrambled into real VSIDs with the
103 * multiplicative hash:
105 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
106 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
107 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
109 * This scramble is only well defined for proto-VSIDs below
110 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
111 * reserved. VSID_MULTIPLIER is prime, so in particular it is
112 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
113 * Because the modulus is 2^n-1 we can compute it efficiently without
114 * a divide or extra multiply (see below).
116 * This scheme has several advantages over older methods:
118 * - We have VSIDs allocated for every kernel address
119 * (i.e. everything above 0xC000000000000000), except the very top
120 * segment, which simplifies several things.
122 * - We allow for 15 significant bits of ESID and 20 bits of
123 * context for user addresses. i.e. 8T (43 bits) of address space for
124 * up to 1M contexts (although the page table structure and context
125 * allocation will need changes to take advantage of this).
127 * - The scramble function gives robust scattering in the hash
128 * table (at least based on some initial results). The previous
129 * method was more susceptible to pathological cases giving excessive
134 * WARNING - If you change these you must make sure the asm
135 * implementations in slb_allocate(), do_stab_bolted and mmu.h
136 * (ASM_VSID_SCRAMBLE macro) are changed accordingly.
138 * You'll also need to change the precomputed VSID values in head.S
139 * which are used by the iSeries firmware.
142 static inline unsigned long vsid_scramble(unsigned long protovsid)
145 /* The code below is equivalent to this function for arguments
146 * < 2^VSID_BITS, which is all this should ever be called
147 * with. However gcc is not clever enough to compute the
148 * modulus (2^n-1) without a second multiply. */
149 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
153 x = protovsid * VSID_MULTIPLIER;
154 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
155 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
159 /* This is only valid for addresses >= KERNELBASE */
160 static inline unsigned long get_kernel_vsid(unsigned long ea)
162 return vsid_scramble(ea >> SID_SHIFT);
165 /* This is only valid for user addresses (which are below 2^41) */
166 static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
168 return vsid_scramble((context << USER_ESID_BITS)
169 | (ea >> SID_SHIFT));
172 #endif /* __PPC64_MMU_CONTEXT_H */