#ifndef __ASM_SYSTEM_H
#define __ASM_SYSTEM_H
-#include <linux/config.h>
#include <linux/kernel.h>
#include <asm/segment.h>
#include <asm/cpufeature.h>
#define switch_to(prev,next,last) do { \
unsigned long esi,edi; \
- asm volatile("pushl %%ebp\n\t" \
+ asm volatile("pushfl\n\t" /* Save flags */ \
+ "pushl %%ebp\n\t" \
"movl %%esp,%0\n\t" /* save ESP */ \
"movl %5,%%esp\n\t" /* restore ESP */ \
"movl $1f,%1\n\t" /* save EIP */ \
"jmp __switch_to\n" \
"1:\t" \
"popl %%ebp\n\t" \
+ "popfl" \
:"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
"=a" (last),"=S" (esi),"=D" (edi) \
:"m" (next->thread.esp),"m" (next->thread.eip), \
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=rm" (value))
-/*
- * Clear and set 'TS' bit respectively
- */
-#define clts() (HYPERVISOR_fpu_taskswitch(0))
#define read_cr0() ({ \
unsigned int __dummy; \
__asm__ __volatile__( \
__dummy; \
})
#define write_cr0(x) \
- __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
#define read_cr2() \
(HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
#define write_cr2(x) \
- __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
#define read_cr3() ({ \
unsigned int __dummy; \
:"=r" (__dummy)); \
__dummy; \
})
-
#define read_cr4_safe() ({ \
unsigned int __dummy; \
/* This could fault if %cr4 does not exist */ \
})
#define write_cr4(x) \
- __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
+ __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() (HYPERVISOR_fpu_taskswitch(0))
#define stts() (HYPERVISOR_fpu_taskswitch(1))
#endif /* __KERNEL__ */
#define wbinvd() \
- __asm__ __volatile__ ("wbinvd": : :"memory");
+ __asm__ __volatile__ ("wbinvd": : :"memory")
static inline unsigned long get_limit(unsigned long segment)
{
* does not enforce ordering, since there is no data dependency between
* the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
- * in cases like thiswhere there are no data dependencies.
+ * in cases like this where there are no data dependencies.
**/
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
-
-/* interrupt control.. */
-
-/*
- * The use of 'barrier' in the following reflects their use as local-lock
- * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
- * critical operations are executed. All critical operations must complete
- * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
- * includes these barriers, for example.
- */
-
-#define __cli() \
-do { \
- vcpu_info_t *_vcpu; \
- preempt_disable(); \
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
- _vcpu->evtchn_upcall_mask = 1; \
- preempt_enable_no_resched(); \
- barrier(); \
-} while (0)
-
-#define __sti() \
-do { \
- vcpu_info_t *_vcpu; \
- barrier(); \
- preempt_disable(); \
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
- _vcpu->evtchn_upcall_mask = 0; \
- barrier(); /* unmask then check (avoid races) */ \
- if (unlikely(_vcpu->evtchn_upcall_pending)) \
- force_evtchn_callback(); \
- preempt_enable(); \
-} while (0)
-
-#define __save_flags(x) \
-do { \
- vcpu_info_t *_vcpu; \
- preempt_disable(); \
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
- (x) = _vcpu->evtchn_upcall_mask; \
- preempt_enable(); \
-} while (0)
-
-#define __restore_flags(x) \
-do { \
- vcpu_info_t *_vcpu; \
- barrier(); \
- preempt_disable(); \
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
- if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
- barrier(); /* unmask then check (avoid races) */ \
- if (unlikely(_vcpu->evtchn_upcall_pending)) \
- force_evtchn_callback(); \
- preempt_enable(); \
- } else \
- preempt_enable_no_resched(); \
-} while (0)
-
-void safe_halt(void);
-void halt(void);
-
-#define __save_and_cli(x) \
-do { \
- vcpu_info_t *_vcpu; \
- preempt_disable(); \
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
- (x) = _vcpu->evtchn_upcall_mask; \
- _vcpu->evtchn_upcall_mask = 1; \
- preempt_enable_no_resched(); \
- barrier(); \
-} while (0)
-
-#define local_irq_save(x) __save_and_cli(x)
-#define local_irq_restore(x) __restore_flags(x)
-#define local_save_flags(x) __save_flags(x)
-#define local_irq_disable() __cli()
-#define local_irq_enable() __sti()
-
-/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
-#define irqs_disabled() \
-({ int ___x; \
- vcpu_info_t *_vcpu; \
- preempt_disable(); \
- _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
- ___x = (_vcpu->evtchn_upcall_mask != 0); \
- preempt_enable_no_resched(); \
- ___x; })
+#include <linux/irqflags.h>
/*
* disable hlt during certain critical i/o operations