4 #include <linux/config.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
8 #include <asm/tlbflush.h>
11 * Used for LDT copy/destruction.
13 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
14 void destroy_context(struct mm_struct *mm);
17 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
20 unsigned cpu = smp_processor_id();
21 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
22 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
26 #define prepare_arch_switch(rq,next) __prepare_arch_switch()
27 #define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
28 #define task_running(rq, p) ((rq)->curr == (p))
30 static inline void __prepare_arch_switch(void)
33 * Save away %fs and %gs. No need to save %es and %ds, as those
34 * are always kernel segments while inside the kernel. Must
35 * happen before reload of cr3/ldt (i.e., not in __switch_to).
37 __asm__ __volatile__ ( "movl %%fs,%0 ; movl %%gs,%1"
38 : "=m" (*(int *)¤t->thread.fs),
39 "=m" (*(int *)¤t->thread.gs));
40 __asm__ __volatile__ ( "movl %0,%%fs ; movl %0,%%gs"
44 static inline void switch_mm(struct mm_struct *prev,
45 struct mm_struct *next,
46 struct task_struct *tsk)
48 int cpu = smp_processor_id();
49 struct mmuext_op _op[2], *op = _op;
51 if (likely(prev != next)) {
52 /* stop flush ipis for the previous mm */
53 cpu_clear(cpu, prev->cpu_vm_mask);
55 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
56 per_cpu(cpu_tlbstate, cpu).active_mm = next;
58 cpu_set(cpu, next->cpu_vm_mask);
60 /* Re-load page tables: load_cr3(next->pgd) */
61 per_cpu(cur_pgd, cpu) = next->pgd;
62 op->cmd = MMUEXT_NEW_BASEPTR;
63 op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
67 * load the LDT, if the LDT is different:
69 if (unlikely(prev->context.ldt != next->context.ldt)) {
70 /* load_LDT_nolock(&next->context, cpu) */
71 op->cmd = MMUEXT_SET_LDT;
72 op->linear_addr = (unsigned long)next->context.ldt;
73 op->nr_ents = next->context.size;
77 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
81 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
82 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
84 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
85 /* We were in lazy tlb mode and leave_mm disabled
86 * tlb flush IPI delivery. We must reload %cr3.
89 load_LDT_nolock(&next->context, cpu);
95 #define deactivate_mm(tsk, mm) \
96 asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
98 #define activate_mm(prev, next) do { \
99 switch_mm((prev),(next),NULL); \