This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / x86_64 / kernel / irqflags-xen.c
1 #include <linux/module.h>
2 #include <linux/smp.h>
3 #include <asm/irqflags.h>
4 #include <asm/hypervisor.h>
5
6 /* 
7  * The use of 'barrier' in the following reflects their use as local-lock
8  * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
9  * critical operations are executed. All critical operations must complete
10  * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
11  * includes these barriers, for example.
12  */
13
14 unsigned long __raw_local_save_flags(void)
15 {
16         struct vcpu_info *_vcpu;
17         unsigned long flags;
18
19         preempt_disable();
20         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
21         flags = _vcpu->evtchn_upcall_mask;
22         preempt_enable();
23
24         return flags;
25 }
26 EXPORT_SYMBOL(__raw_local_save_flags);
27
28 void raw_local_irq_restore(unsigned long flags)
29 {
30         struct vcpu_info *_vcpu;
31         preempt_disable();
32         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
33         if ((_vcpu->evtchn_upcall_mask = flags) == 0) {
34                 barrier(); /* unmask then check (avoid races) */
35                 if ( unlikely(_vcpu->evtchn_upcall_pending) )
36                         force_evtchn_callback();
37                 preempt_enable();
38         } else
39                 preempt_enable_no_resched();
40 }
41 EXPORT_SYMBOL(raw_local_irq_restore);
42
43 void raw_local_irq_disable(void)
44 {
45         struct vcpu_info *_vcpu;
46
47         preempt_disable();
48         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
49         _vcpu->evtchn_upcall_mask = 1;
50         preempt_enable_no_resched();
51 }
52 EXPORT_SYMBOL(raw_local_irq_disable);
53
54 void raw_local_irq_enable(void)
55 {
56         struct vcpu_info *_vcpu;
57
58         preempt_disable();
59         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
60         _vcpu->evtchn_upcall_mask = 0;
61         barrier(); /* unmask then check (avoid races) */
62         if ( unlikely(_vcpu->evtchn_upcall_pending) )
63                 force_evtchn_callback();
64         preempt_enable();
65 }
66 EXPORT_SYMBOL(raw_local_irq_enable);
67
68 /*
69  * For spinlocks, etc.:
70  */
71
72 unsigned long __raw_local_irq_save(void)
73 {
74         struct vcpu_info *_vcpu;
75         unsigned long flags;
76
77         preempt_disable();
78         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
79         flags = _vcpu->evtchn_upcall_mask;
80         _vcpu->evtchn_upcall_mask = 1;
81         preempt_enable_no_resched();
82
83         return flags;
84 }
85 EXPORT_SYMBOL(__raw_local_irq_save);
86
87 /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
88 int raw_irqs_disabled(void)
89 {
90         struct vcpu_info *_vcpu;
91         int disabled;
92
93         preempt_disable();
94         _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id];
95         disabled = (_vcpu->evtchn_upcall_mask != 0);
96         preempt_enable_no_resched();
97
98         return disabled;
99 }
100 EXPORT_SYMBOL(raw_irqs_disabled);