This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / i386 / mach-xen / irqflags.c
1 #include <linux/module.h>
2 #include <linux/smp.h>
3 #include <asm/irqflags.h>
4 #include <asm/hypervisor.h>
5
6 /* interrupt control.. */
7
8 /* 
9  * The use of 'barrier' in the following reflects their use as local-lock
10  * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
11  * critical operations are executed. All critical operations must complete
12  * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
13  * includes these barriers, for example.
14  */
15
16 unsigned long __raw_local_save_flags(void)
17 {
18         struct vcpu_info *_vcpu;
19         unsigned long flags;
20
21         preempt_disable();
22         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
23         flags = _vcpu->evtchn_upcall_mask;
24         preempt_enable();
25
26         return flags;
27 }
28 EXPORT_SYMBOL(__raw_local_save_flags);
29
30 void raw_local_irq_restore(unsigned long flags)
31 {
32         struct vcpu_info *_vcpu;
33
34         preempt_disable();
35         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
36         if ((_vcpu->evtchn_upcall_mask = flags) == 0) {
37                 barrier(); /* unmask then check (avoid races) */
38                 if (unlikely(_vcpu->evtchn_upcall_pending))
39                         force_evtchn_callback();
40                 preempt_enable();
41         } else
42                 preempt_enable_no_resched();
43
44 }
45 EXPORT_SYMBOL(raw_local_irq_restore);
46
47 void raw_local_irq_disable(void)
48 {
49         struct vcpu_info *_vcpu;
50
51         preempt_disable();
52         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
53         _vcpu->evtchn_upcall_mask = 1;
54         preempt_enable_no_resched();
55 }
56 EXPORT_SYMBOL(raw_local_irq_disable);
57
58 void raw_local_irq_enable(void)
59 {
60         struct vcpu_info *_vcpu;
61
62         preempt_disable();
63         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
64         _vcpu->evtchn_upcall_mask = 0;
65         barrier(); /* unmask then check (avoid races) */
66         if (unlikely(_vcpu->evtchn_upcall_pending))
67                 force_evtchn_callback();
68         preempt_enable();
69 }
70 EXPORT_SYMBOL(raw_local_irq_enable);
71
72 /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
73 int raw_irqs_disabled(void)
74 {
75         struct vcpu_info *_vcpu;
76         int disabled;
77
78         preempt_disable();
79         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
80         disabled = (_vcpu->evtchn_upcall_mask != 0);
81         preempt_enable_no_resched();
82         return disabled;
83 }
84 EXPORT_SYMBOL(raw_irqs_disabled);
85
86 unsigned long __raw_local_irq_save(void)
87 {
88         struct vcpu_info *_vcpu;
89         unsigned long flags;
90
91         preempt_disable();
92         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
93         flags = _vcpu->evtchn_upcall_mask;
94         _vcpu->evtchn_upcall_mask = 1;
95         preempt_enable_no_resched();
96
97         return flags;
98 }
99 EXPORT_SYMBOL(__raw_local_irq_save);