1 #ifndef _ASM_IA64_MMU_CONTEXT_H
2 #define _ASM_IA64_MMU_CONTEXT_H
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Routines to manage the allocation of task context numbers. Task context numbers are
11 * used to reduce or eliminate the need to perform TLB flushes due to context switches.
12 * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
13 * consider the region number when performing a TLB lookup, we need to assign a unique
14 * region id to each region in a process. We use the least significant three bits in a
15 * region id for this purpose.
18 #define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
20 #define ia64_rid(ctx,addr) (((ctx) << 3) | (addr >> 61))
24 #include <linux/compiler.h>
25 #include <linux/percpu.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
29 #include <asm/processor.h>
31 #define MMU_CONTEXT_DEBUG 0
35 #include <ia64intrin.h>
37 extern struct mmu_trace_entry {
44 extern volatile int mmu_tbuf_index;
46 # define MMU_TRACE(_op,_cpu,_mm,_ctx) \
48 int i = __sync_fetch_and_add(&mmu_tbuf_index, 1) % ARRAY_SIZE(mmu_tbuf); \
49 struct mmu_trace_entry e; \
58 # define MMU_TRACE(op,cpu,mm,ctx) do { ; } while (0)
63 unsigned int next; /* next context number to use */
64 unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
65 unsigned int max_ctx; /* max. context value supported by all CPUs */
68 extern struct ia64_ctx ia64_ctx;
69 DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
71 extern void wrap_mmu_context (struct mm_struct *mm);
74 enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
79 * When the context counter wraps around all TLBs need to be flushed because an old
80 * context number might have been reused. This is signalled by the ia64_need_tlb_flush
81 * per-CPU variable, which is checked in the routine below. Called by activate_mm().
85 delayed_tlb_flush (void)
87 extern void local_flush_tlb_all (void);
89 if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
90 local_flush_tlb_all();
91 __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
95 static inline mm_context_t
96 get_mmu_context (struct mm_struct *mm)
99 mm_context_t context = mm->context;
104 spin_lock_irqsave(&ia64_ctx.lock, flags);
106 /* re-check, now that we've got the lock: */
107 context = mm->context;
109 cpus_clear(mm->cpu_vm_mask);
110 if (ia64_ctx.next >= ia64_ctx.limit)
111 wrap_mmu_context(mm);
112 mm->context = context = ia64_ctx.next++;
115 spin_unlock_irqrestore(&ia64_ctx.lock, flags);
120 * Initialize context number to some sane value. MM is guaranteed to be a brand-new
121 * address-space, so no TLB flushing is needed, ever.
124 init_new_context (struct task_struct *p, struct mm_struct *mm)
126 MMU_TRACE('N', smp_processor_id(), mm, 0);
132 destroy_context (struct mm_struct *mm)
135 MMU_TRACE('D', smp_processor_id(), mm, mm->context);
139 reload_context (mm_context_t context)
142 unsigned long rid_incr = 0;
143 unsigned long rr0, rr1, rr2, rr3, rr4, old_rr4;
145 old_rr4 = ia64_get_rr(0x8000000000000000);
146 rid = context << 3; /* make space for encoding the region number */
149 /* encode the region id, preferred page size, and VHPT enable bit: */
150 rr0 = (rid << 8) | (PAGE_SHIFT << 2) | 1;
151 rr1 = rr0 + 1*rid_incr;
152 rr2 = rr0 + 2*rid_incr;
153 rr3 = rr0 + 3*rid_incr;
154 rr4 = rr0 + 4*rid_incr;
155 #ifdef CONFIG_HUGETLB_PAGE
156 rr4 = (rr4 & (~(0xfcUL))) | (old_rr4 & 0xfc);
159 ia64_set_rr(0x0000000000000000, rr0);
160 ia64_set_rr(0x2000000000000000, rr1);
161 ia64_set_rr(0x4000000000000000, rr2);
162 ia64_set_rr(0x6000000000000000, rr3);
163 ia64_set_rr(0x8000000000000000, rr4);
164 ia64_srlz_i(); /* srlz.i implies srlz.d */
168 activate_context (struct mm_struct *mm)
170 mm_context_t context;
173 context = get_mmu_context(mm);
174 MMU_TRACE('A', smp_processor_id(), mm, context);
175 if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
176 cpu_set(smp_processor_id(), mm->cpu_vm_mask);
177 reload_context(context);
178 MMU_TRACE('a', smp_processor_id(), mm, context);
179 /* in the unlikely event of a TLB-flush by another thread, redo the load: */
180 } while (unlikely(context != mm->context));
183 #define deactivate_mm(tsk,mm) \
185 MMU_TRACE('d', smp_processor_id(), mm, mm->context); \
189 * Switch from address space PREV to address space NEXT.
192 activate_mm (struct mm_struct *prev, struct mm_struct *next)
197 * We may get interrupts here, but that's OK because interrupt handlers cannot
200 ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
201 activate_context(next);
204 #define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
206 # endif /* ! __ASSEMBLY__ */
207 #endif /* _ASM_IA64_MMU_CONTEXT_H */