fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / include / asm-i386 / mach-xen / asm / mmu_context.h
1 #ifndef __I386_SCHED_H
2 #define __I386_SCHED_H
3
4 #include <asm/desc.h>
5 #include <asm/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
8
9 /*
10  * Used for LDT copy/destruction.
11  */
12 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
13 void destroy_context(struct mm_struct *mm);
14
15
16 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
17 {
18 #if 0 /* XEN: no lazy tlb */
19         unsigned cpu = smp_processor_id();
20         if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
21                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
22 #endif
23 }
24
25 #define prepare_arch_switch(next)       __prepare_arch_switch()
26
27 static inline void __prepare_arch_switch(void)
28 {
29         /*
30          * Save away %fs and %gs. No need to save %es and %ds, as those
31          * are always kernel segments while inside the kernel. Must
32          * happen before reload of cr3/ldt (i.e., not in __switch_to).
33          */
34         asm volatile ( "mov %%fs,%0"
35                 : "=m" (current->thread.fs));
36         asm volatile ( "movl %0,%%fs"
37                 : : "r" (0) );
38 }
39
40 extern void mm_pin(struct mm_struct *mm);
41 extern void mm_unpin(struct mm_struct *mm);
42 void mm_pin_all(void);
43
44 static inline void switch_mm(struct mm_struct *prev,
45                              struct mm_struct *next,
46                              struct task_struct *tsk)
47 {
48         int cpu = smp_processor_id();
49         struct mmuext_op _op[2], *op = _op;
50
51         if (likely(prev != next)) {
52                 BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
53                        !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
54
55                 /* stop flush ipis for the previous mm */
56                 cpu_clear(cpu, prev->cpu_vm_mask);
57 #if 0 /* XEN: no lazy tlb */
58                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
59                 per_cpu(cpu_tlbstate, cpu).active_mm = next;
60 #endif
61                 cpu_set(cpu, next->cpu_vm_mask);
62
63                 /* Re-load page tables: load_cr3(next->pgd) */
64                 op->cmd = MMUEXT_NEW_BASEPTR;
65                 op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
66                 op++;
67
68                 /*
69                  * load the LDT, if the LDT is different:
70                  */
71                 if (unlikely(prev->context.ldt != next->context.ldt)) {
72                         /* load_LDT_nolock(&next->context) */
73                         op->cmd = MMUEXT_SET_LDT;
74                         op->arg1.linear_addr = (unsigned long)next->context.ldt;
75                         op->arg2.nr_ents     = next->context.size;
76                         op++;
77                 }
78
79                 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
80         }
81 #if 0 /* XEN: no lazy tlb */
82         else {
83                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
84                 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
85
86                 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
87                         /* We were in lazy tlb mode and leave_mm disabled 
88                          * tlb flush IPI delivery. We must reload %cr3.
89                          */
90                         load_cr3(next->pgd);
91                         load_LDT_nolock(&next->context);
92                 }
93         }
94 #endif
95 }
96
97 #define deactivate_mm(tsk, mm)                  \
98         asm("movl %0,%%fs": :"r" (0));
99
100 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
101 {
102         if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
103                 mm_pin(next);
104         switch_mm(prev, next, NULL);
105 }
106
107 #endif