5 #include <asm/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
10 * Used for LDT copy/destruction.
12 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
13 void destroy_context(struct mm_struct *mm);
16 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
18 #if 0 /* XEN: no lazy tlb */
19 unsigned cpu = smp_processor_id();
20 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
21 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
25 #define prepare_arch_switch(next) __prepare_arch_switch()
27 static inline void __prepare_arch_switch(void)
30 * Save away %fs and %gs. No need to save %es and %ds, as those
31 * are always kernel segments while inside the kernel. Must
32 * happen before reload of cr3/ldt (i.e., not in __switch_to).
34 asm volatile ( "mov %%fs,%0"
35 : "=m" (current->thread.fs));
36 asm volatile ( "movl %0,%%fs"
40 extern void mm_pin(struct mm_struct *mm);
41 extern void mm_unpin(struct mm_struct *mm);
42 void mm_pin_all(void);
44 static inline void switch_mm(struct mm_struct *prev,
45 struct mm_struct *next,
46 struct task_struct *tsk)
48 int cpu = smp_processor_id();
49 struct mmuext_op _op[2], *op = _op;
51 if (likely(prev != next)) {
52 BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
53 !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
55 /* stop flush ipis for the previous mm */
56 cpu_clear(cpu, prev->cpu_vm_mask);
57 #if 0 /* XEN: no lazy tlb */
58 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
59 per_cpu(cpu_tlbstate, cpu).active_mm = next;
61 cpu_set(cpu, next->cpu_vm_mask);
63 /* Re-load page tables: load_cr3(next->pgd) */
64 op->cmd = MMUEXT_NEW_BASEPTR;
65 op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
69 * load the LDT, if the LDT is different:
71 if (unlikely(prev->context.ldt != next->context.ldt)) {
72 /* load_LDT_nolock(&next->context) */
73 op->cmd = MMUEXT_SET_LDT;
74 op->arg1.linear_addr = (unsigned long)next->context.ldt;
75 op->arg2.nr_ents = next->context.size;
79 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
81 #if 0 /* XEN: no lazy tlb */
83 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
84 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
86 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
87 /* We were in lazy tlb mode and leave_mm disabled
88 * tlb flush IPI delivery. We must reload %cr3.
91 load_LDT_nolock(&next->context);
97 #define deactivate_mm(tsk, mm) \
98 asm("movl %0,%%fs": :"r" (0));
100 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
102 if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
104 switch_mm(prev, next, NULL);