X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fx86_64%2Fkernel%2Firqflags-xen.c;fp=arch%2Fx86_64%2Fkernel%2Firqflags-xen.c;h=e3b7ab5e6cf2e60e6540caeb4590245596957f5d;hb=f05f9504c50ed069377d37f02f22e7a16b5921de;hp=0000000000000000000000000000000000000000;hpb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;p=linux-2.6.git diff --git a/arch/x86_64/kernel/irqflags-xen.c b/arch/x86_64/kernel/irqflags-xen.c new file mode 100644 index 000000000..e3b7ab5e6 --- /dev/null +++ b/arch/x86_64/kernel/irqflags-xen.c @@ -0,0 +1,100 @@ +#include +#include +#include +#include + +/* + * The use of 'barrier' in the following reflects their use as local-lock + * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following + * critical operations are executed. All critical operations must complete + * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also + * includes these barriers, for example. + */ + +unsigned long __raw_local_save_flags(void) +{ + struct vcpu_info *_vcpu; + unsigned long flags; + + preempt_disable(); + _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; + flags = _vcpu->evtchn_upcall_mask; + preempt_enable(); + + return flags; +} +EXPORT_SYMBOL(__raw_local_save_flags); + +void raw_local_irq_restore(unsigned long flags) +{ + struct vcpu_info *_vcpu; + preempt_disable(); + _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; + if ((_vcpu->evtchn_upcall_mask = flags) == 0) { + barrier(); /* unmask then check (avoid races) */ + if ( unlikely(_vcpu->evtchn_upcall_pending) ) + force_evtchn_callback(); + preempt_enable(); + } else + preempt_enable_no_resched(); +} +EXPORT_SYMBOL(raw_local_irq_restore); + +void raw_local_irq_disable(void) +{ + struct vcpu_info *_vcpu; + + preempt_disable(); + _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; + _vcpu->evtchn_upcall_mask = 1; + preempt_enable_no_resched(); +} +EXPORT_SYMBOL(raw_local_irq_disable); + +void raw_local_irq_enable(void) +{ + struct vcpu_info *_vcpu; + + preempt_disable(); + _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; + _vcpu->evtchn_upcall_mask = 0; + barrier(); /* unmask then check (avoid races) */ + if ( unlikely(_vcpu->evtchn_upcall_pending) ) + force_evtchn_callback(); + preempt_enable(); +} +EXPORT_SYMBOL(raw_local_irq_enable); + +/* + * For spinlocks, etc.: + */ + +unsigned long __raw_local_irq_save(void) +{ + struct vcpu_info *_vcpu; + unsigned long flags; + + preempt_disable(); + _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; + flags = _vcpu->evtchn_upcall_mask; + _vcpu->evtchn_upcall_mask = 1; + preempt_enable_no_resched(); + + return flags; +} +EXPORT_SYMBOL(__raw_local_irq_save); + +/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */ +int raw_irqs_disabled(void) +{ + struct vcpu_info *_vcpu; + int disabled; + + preempt_disable(); + _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; + disabled = (_vcpu->evtchn_upcall_mask != 0); + preempt_enable_no_resched(); + + return disabled; +} +EXPORT_SYMBOL(raw_irqs_disabled);