This commit was generated by cvs2svn to compensate for changes in r925,
[linux-2.6.git] / include / asm-xen / asm-i386 / mmu_context.h
1 #ifndef __I386_SCHED_H
2 #define __I386_SCHED_H
3
4 #include <linux/config.h>
5 #include <asm/desc.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
8 #include <asm/tlbflush.h>
9
10 /*
11  * Used for LDT copy/destruction.
12  */
13 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
14 void destroy_context(struct mm_struct *mm);
15
16
17 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
18 {
19 #if 0 /* XEN */
20         unsigned cpu = smp_processor_id();
21         if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
22                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
23 #endif
24 }
25
26 #define prepare_arch_switch(rq,next)    __prepare_arch_switch()
27 #define finish_arch_switch(rq, next)    spin_unlock_irq(&(rq)->lock)
28 #define task_running(rq, p)             ((rq)->curr == (p))
29
30 static inline void __prepare_arch_switch(void)
31 {
32         /*
33          * Save away %fs and %gs. No need to save %es and %ds, as those
34          * are always kernel segments while inside the kernel. Must
35          * happen before reload of cr3/ldt (i.e., not in __switch_to).
36          */
37         __asm__ __volatile__ ( "movl %%fs,%0 ; movl %%gs,%1"
38                 : "=m" (*(int *)&current->thread.fs),
39                   "=m" (*(int *)&current->thread.gs));
40         __asm__ __volatile__ ( "movl %0,%%fs ; movl %0,%%gs"
41                 : : "r" (0) );
42 }
43
44 static inline void switch_mm(struct mm_struct *prev,
45                              struct mm_struct *next,
46                              struct task_struct *tsk)
47 {
48         int cpu = smp_processor_id();
49         struct mmuext_op _op[2], *op = _op;
50
51         if (likely(prev != next)) {
52                 /* stop flush ipis for the previous mm */
53                 cpu_clear(cpu, prev->cpu_vm_mask);
54 #if 0 /* XEN */
55                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
56                 per_cpu(cpu_tlbstate, cpu).active_mm = next;
57 #endif
58                 cpu_set(cpu, next->cpu_vm_mask);
59
60                 /* Re-load page tables: load_cr3(next->pgd) */
61                 per_cpu(cur_pgd, cpu) = next->pgd;
62                 op->cmd = MMUEXT_NEW_BASEPTR;
63                 op->mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
64                 op++;
65
66                 /*
67                  * load the LDT, if the LDT is different:
68                  */
69                 if (unlikely(prev->context.ldt != next->context.ldt)) {
70                         /* load_LDT_nolock(&next->context, cpu) */
71                         op->cmd = MMUEXT_SET_LDT;
72                         op->linear_addr = (unsigned long)next->context.ldt;
73                         op->nr_ents     = next->context.size;
74                         op++;
75                 }
76
77                 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
78         }
79 #if 0 /* XEN */
80         else {
81                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
82                 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
83
84                 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
85                         /* We were in lazy tlb mode and leave_mm disabled 
86                          * tlb flush IPI delivery. We must reload %cr3.
87                          */
88                         load_cr3(next->pgd);
89                         load_LDT_nolock(&next->context, cpu);
90                 }
91         }
92 #endif
93 }
94
95 #define deactivate_mm(tsk, mm) \
96         asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
97
98 #define activate_mm(prev, next) do {            \
99         switch_mm((prev),(next),NULL);          \
100 } while (0)
101
102 #endif