fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / i386 / mach-xen / irqflags.c
1 #include <linux/module.h>
2 #include <linux/smp.h>
3 #include <asm/irqflags.h>
4 #include <asm/hypervisor.h>
5
6 /* interrupt control.. */
7
8 /* 
9  * The use of 'barrier' in the following reflects their use as local-lock
10  * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
11  * critical operations are executed. All critical operations must complete
12  * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
13  * includes these barriers, for example.
14  */
15
16 unsigned long __raw_local_save_flags(void)
17 {
18         struct vcpu_info *_vcpu;
19         unsigned long flags;
20
21         preempt_disable();
22         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
23         flags = _vcpu->evtchn_upcall_mask;
24         preempt_enable();
25
26         return flags;
27 }
28 EXPORT_SYMBOL(__raw_local_save_flags);
29
30 void raw_local_irq_restore(unsigned long flags)
31 {
32         struct vcpu_info *_vcpu;
33
34         preempt_disable();
35         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
36         if ((_vcpu->evtchn_upcall_mask = flags) == 0) {
37                 barrier(); /* unmask then check (avoid races) */
38                 if (unlikely(_vcpu->evtchn_upcall_pending))
39                         force_evtchn_callback();
40                 preempt_enable();
41         } else
42                 preempt_enable_no_resched();
43 }
44 EXPORT_SYMBOL(raw_local_irq_restore);
45
46 void raw_local_irq_disable(void)
47 {
48         struct vcpu_info *_vcpu;
49
50         preempt_disable();
51         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
52         _vcpu->evtchn_upcall_mask = 1;
53         preempt_enable_no_resched();
54 }
55 EXPORT_SYMBOL(raw_local_irq_disable);
56
57 void raw_local_irq_enable(void)
58 {
59         struct vcpu_info *_vcpu;
60
61         preempt_disable();
62         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
63         _vcpu->evtchn_upcall_mask = 0;
64         barrier(); /* unmask then check (avoid races) */
65         if (unlikely(_vcpu->evtchn_upcall_pending))
66                 force_evtchn_callback();
67         preempt_enable();
68 }
69 EXPORT_SYMBOL(raw_local_irq_enable);
70
71 /*
72  * For spinlocks, etc.:
73  */
74
75 unsigned long __raw_local_irq_save(void)
76 {
77         struct vcpu_info *_vcpu;
78         unsigned long flags;
79
80         preempt_disable();
81         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
82         flags = _vcpu->evtchn_upcall_mask;
83         _vcpu->evtchn_upcall_mask = 1;
84         preempt_enable_no_resched();
85
86         return flags;
87 }
88 EXPORT_SYMBOL(__raw_local_irq_save);
89
90 /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
91 int raw_irqs_disabled(void)
92 {
93         struct vcpu_info *_vcpu;
94         int disabled;
95
96         preempt_disable();
97         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
98         disabled = (_vcpu->evtchn_upcall_mask != 0);
99         preempt_enable_no_resched();
100
101         return disabled;
102 }
103 EXPORT_SYMBOL(raw_irqs_disabled);