This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / include / asm-i386 / mach-xen / asm / mmu_context.h
1 #ifndef __I386_SCHED_H
2 #define __I386_SCHED_H
3
4 #include <linux/config.h>
5 #include <asm/desc.h>
6 #include <asm/atomic.h>
7 #include <asm/pgalloc.h>
8 #include <asm/tlbflush.h>
9
10 /*
11  * Used for LDT copy/destruction.
12  */
13 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
14 void destroy_context(struct mm_struct *mm);
15
16
17 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
18 {
19 #if 0 /* XEN: no lazy tlb */
20         unsigned cpu = smp_processor_id();
21         if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
22                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
23 #endif
24 }
25
26 #define prepare_arch_switch(next)       __prepare_arch_switch()
27
28 static inline void __prepare_arch_switch(void)
29 {
30         /*
31          * Save away %fs and %gs. No need to save %es and %ds, as those
32          * are always kernel segments while inside the kernel. Must
33          * happen before reload of cr3/ldt (i.e., not in __switch_to).
34          */
35         asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
36                 : "=m" (current->thread.fs),
37                   "=m" (current->thread.gs));
38         asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
39                 : : "r" (0) );
40 }
41
42 extern void mm_pin(struct mm_struct *mm);
43 extern void mm_unpin(struct mm_struct *mm);
44 void mm_pin_all(void);
45
46 static inline void switch_mm(struct mm_struct *prev,
47                              struct mm_struct *next,
48                              struct task_struct *tsk)
49 {
50         int cpu = smp_processor_id();
51         struct mmuext_op _op[2], *op = _op;
52
53         if (likely(prev != next)) {
54                 BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
55                        !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
56
57                 /* stop flush ipis for the previous mm */
58                 cpu_clear(cpu, prev->cpu_vm_mask);
59 #if 0 /* XEN: no lazy tlb */
60                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
61                 per_cpu(cpu_tlbstate, cpu).active_mm = next;
62 #endif
63                 cpu_set(cpu, next->cpu_vm_mask);
64
65                 /* Re-load page tables: load_cr3(next->pgd) */
66                 op->cmd = MMUEXT_NEW_BASEPTR;
67                 op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
68                 op++;
69
70                 /*
71                  * load the LDT, if the LDT is different:
72                  */
73                 if (unlikely(prev->context.ldt != next->context.ldt)) {
74                         /* load_LDT_nolock(&next->context, cpu) */
75                         op->cmd = MMUEXT_SET_LDT;
76                         op->arg1.linear_addr = (unsigned long)next->context.ldt;
77                         op->arg2.nr_ents     = next->context.size;
78                         op++;
79                 }
80
81                 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
82         }
83 #if 0 /* XEN: no lazy tlb */
84         else {
85                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
86                 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
87
88                 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
89                         /* We were in lazy tlb mode and leave_mm disabled 
90                          * tlb flush IPI delivery. We must reload %cr3.
91                          */
92                         load_cr3(next->pgd);
93                         load_LDT_nolock(&next->context, cpu);
94                 }
95         }
96 #endif
97 }
98
99 #define deactivate_mm(tsk, mm) \
100         asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
101
102 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
103 {
104         if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
105                 mm_pin(next);
106         switch_mm(prev, next, NULL);
107 }
108
109 #endif