1 #ifndef __PPC64_MMU_CONTEXT_H
2 #define __PPC64_MMU_CONTEXT_H
4 #include <linux/config.h>
5 #include <linux/spinlock.h>
6 #include <linux/kernel.h>
9 #include <asm/ppcdebug.h>
10 #include <asm/cputable.h>
13 * Copyright (C) 2001 PPC 64 Team, IBM Corp
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
22 * Every architecture must define this function. It's the fastest
23 * way of searching a 140-bit bitmap where the first 100 bits are
24 * unlikely to be set. It's guaranteed that at least one of the 140
27 static inline int sched_find_first_bit(unsigned long *b)
32 return __ffs(b[1]) + 64;
33 return __ffs(b[2]) + 128;
37 #define FIRST_USER_CONTEXT 0x10 /* First 16 reserved for kernel */
38 #define LAST_USER_CONTEXT 0x8000 /* Same as PID_MAX for now... */
39 #define NUM_USER_CONTEXT (LAST_USER_CONTEXT-FIRST_USER_CONTEXT)
41 /* Choose whether we want to implement our context
42 * number allocator as a LIFO or FIFO queue.
45 #define MMU_CONTEXT_LIFO
47 #define MMU_CONTEXT_FIFO
50 struct mmu_context_queue_t {
55 mm_context_id_t elements[LAST_USER_CONTEXT];
58 extern struct mmu_context_queue_t mmu_context_queue;
61 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
66 * The context number queue has underflowed.
67 * Meaning: we tried to push a context number that was freed
68 * back onto the context queue and the queue was already full.
71 mmu_context_underflow(void)
73 printk(KERN_DEBUG "mmu_context_underflow\n");
74 panic("mmu_context_underflow");
78 * Set up the context for a new address space.
81 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
85 /* This does the right thing across a fork (I hope) */
87 spin_lock_irqsave(&mmu_context_queue.lock, flags);
89 if (mmu_context_queue.size <= 0) {
90 spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
94 head = mmu_context_queue.head;
95 mm->context.id = mmu_context_queue.elements[head];
97 head = (head < LAST_USER_CONTEXT-1) ? head+1 : 0;
98 mmu_context_queue.head = head;
99 mmu_context_queue.size--;
101 spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
107 * We're finished using the context for an address space.
110 destroy_context(struct mm_struct *mm)
115 spin_lock_irqsave(&mmu_context_queue.lock, flags);
117 if (mmu_context_queue.size >= NUM_USER_CONTEXT) {
118 spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
119 mmu_context_underflow();
122 #ifdef MMU_CONTEXT_LIFO
123 index = mmu_context_queue.head;
124 index = (index > 0) ? index-1 : LAST_USER_CONTEXT-1;
125 mmu_context_queue.head = index;
127 index = mmu_context_queue.tail;
128 index = (index < LAST_USER_CONTEXT-1) ? index+1 : 0;
129 mmu_context_queue.tail = index;
132 mmu_context_queue.size++;
133 mmu_context_queue.elements[index] = mm->context.id;
135 spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
138 extern void flush_stab(struct task_struct *tsk, struct mm_struct *mm);
139 extern void flush_slb(struct task_struct *tsk, struct mm_struct *mm);
142 * switch_mm is the entry point called from the architecture independent
143 * code in kernel/sched.c
145 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
146 struct task_struct *tsk)
148 #ifdef CONFIG_ALTIVEC
152 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
154 #endif /* CONFIG_ALTIVEC */
156 if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
157 cpu_set(smp_processor_id(), next->cpu_vm_mask);
159 /* No need to flush userspace segments if the mm doesnt change */
163 if (cur_cpu_spec->cpu_features & CPU_FTR_SLB)
164 flush_slb(tsk, next);
166 flush_stab(tsk, next);
169 #define deactivate_mm(tsk,mm) do { } while (0)
172 * After we have set current->mm to a new value, this activates
173 * the context for the new mm so we see the new mappings.
175 #define activate_mm(active_mm, mm) \
176 switch_mm(active_mm, mm, current);
178 #define VSID_RANDOMIZER 42470972311
179 #define VSID_MASK 0xfffffffff
182 /* This is only valid for kernel (including vmalloc, imalloc and bolted) EA's
184 static inline unsigned long
185 get_kernel_vsid( unsigned long ea )
187 unsigned long ordinal, vsid;
189 ordinal = (((ea >> 28) & 0x1fff) * LAST_USER_CONTEXT) | (ea >> 60);
190 vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
192 ifppcdebug(PPCDBG_HTABSTRESS) {
193 /* For debug, this path creates a very poor vsid distribuition.
194 * A user program can access virtual addresses in the form
195 * 0x0yyyyxxxx000 where yyyy = xxxx to cause multiple mappings
196 * to hash to the same page table group.
198 ordinal = ((ea >> 28) & 0x1fff) | (ea >> 44);
199 vsid = ordinal & VSID_MASK;
205 /* This is only valid for user EA's (user EA's do not exceed 2^41 (EADDR_SIZE))
207 static inline unsigned long
208 get_vsid( unsigned long context, unsigned long ea )
210 unsigned long ordinal, vsid;
212 ordinal = (((ea >> 28) & 0x1fff) * LAST_USER_CONTEXT) | context;
213 vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
215 ifppcdebug(PPCDBG_HTABSTRESS) {
216 /* See comment above. */
217 ordinal = ((ea >> 28) & 0x1fff) | (context << 16);
218 vsid = ordinal & VSID_MASK;
224 #endif /* __PPC64_MMU_CONTEXT_H */