X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fmach-xen%2Fasm%2Fsystem.h;h=d8ca39352a9a180d099da4bc1a618529899d9e96;hb=8a0c551415dc3c5aa1e6cac99724ad795498485b;hp=812e7bce49e7cddf117d4352e90aa93c28c323a7;hpb=b1644648ae5a87454d06f1dd96783b0ec4d292df;p=linux-2.6.git diff --git a/include/asm-i386/mach-xen/asm/system.h b/include/asm-i386/mach-xen/asm/system.h index 812e7bce4..d8ca39352 100644 --- a/include/asm-i386/mach-xen/asm/system.h +++ b/include/asm-i386/mach-xen/asm/system.h @@ -1,7 +1,6 @@ #ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H -#include #include #include #include @@ -22,7 +21,8 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc #define switch_to(prev,next,last) do { \ unsigned long esi,edi; \ - asm volatile("pushl %%ebp\n\t" \ + asm volatile("pushfl\n\t" /* Save flags */ \ + "pushl %%ebp\n\t" \ "movl %%esp,%0\n\t" /* save ESP */ \ "movl %5,%%esp\n\t" /* restore ESP */ \ "movl $1f,%1\n\t" /* save EIP */ \ @@ -30,6 +30,7 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc "jmp __switch_to\n" \ "1:\t" \ "popl %%ebp\n\t" \ + "popfl" \ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ "=a" (last),"=S" (esi),"=D" (edi) \ :"m" (next->thread.esp),"m" (next->thread.eip), \ @@ -91,10 +92,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) -/* - * Clear and set 'TS' bit respectively - */ -#define clts() (HYPERVISOR_fpu_taskswitch(0)) #define read_cr0() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ @@ -103,12 +100,12 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ __dummy; \ }) #define write_cr0(x) \ - __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); + __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) #define read_cr2() \ (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2) #define write_cr2(x) \ - __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); + __asm__ __volatile__("movl %0,%%cr2": :"r" (x)) #define read_cr3() ({ \ unsigned int __dummy; \ @@ -131,7 +128,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) - #define read_cr4_safe() ({ \ unsigned int __dummy; \ /* This could fault if %cr4 does not exist */ \ @@ -145,13 +141,17 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ }) #define write_cr4(x) \ - __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); + __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) +/* + * Clear and set 'TS' bit respectively + */ +#define clts() (HYPERVISOR_fpu_taskswitch(0)) #define stts() (HYPERVISOR_fpu_taskswitch(1)) #endif /* __KERNEL__ */ #define wbinvd() \ - __asm__ __volatile__ ("wbinvd": : :"memory"); + __asm__ __volatile__ ("wbinvd": : :"memory") static inline unsigned long get_limit(unsigned long segment) { @@ -435,7 +435,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like thiswhere there are no data dependencies. + * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while(0) @@ -462,94 +462,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -/* interrupt control.. */ - -/* - * The use of 'barrier' in the following reflects their use as local-lock - * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following - * critical operations are executed. All critical operations must complete - * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also - * includes these barriers, for example. - */ - -#define __cli() \ -do { \ - vcpu_info_t *_vcpu; \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - _vcpu->evtchn_upcall_mask = 1; \ - preempt_enable_no_resched(); \ - barrier(); \ -} while (0) - -#define __sti() \ -do { \ - vcpu_info_t *_vcpu; \ - barrier(); \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - _vcpu->evtchn_upcall_mask = 0; \ - barrier(); /* unmask then check (avoid races) */ \ - if (unlikely(_vcpu->evtchn_upcall_pending)) \ - force_evtchn_callback(); \ - preempt_enable(); \ -} while (0) - -#define __save_flags(x) \ -do { \ - vcpu_info_t *_vcpu; \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - (x) = _vcpu->evtchn_upcall_mask; \ - preempt_enable(); \ -} while (0) - -#define __restore_flags(x) \ -do { \ - vcpu_info_t *_vcpu; \ - barrier(); \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ - barrier(); /* unmask then check (avoid races) */ \ - if (unlikely(_vcpu->evtchn_upcall_pending)) \ - force_evtchn_callback(); \ - preempt_enable(); \ - } else \ - preempt_enable_no_resched(); \ -} while (0) - -void safe_halt(void); -void halt(void); - -#define __save_and_cli(x) \ -do { \ - vcpu_info_t *_vcpu; \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - (x) = _vcpu->evtchn_upcall_mask; \ - _vcpu->evtchn_upcall_mask = 1; \ - preempt_enable_no_resched(); \ - barrier(); \ -} while (0) - -#define local_irq_save(x) __save_and_cli(x) -#define local_irq_restore(x) __restore_flags(x) -#define local_save_flags(x) __save_flags(x) -#define local_irq_disable() __cli() -#define local_irq_enable() __sti() - -/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */ -#define irqs_disabled() \ -({ int ___x; \ - vcpu_info_t *_vcpu; \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - ___x = (_vcpu->evtchn_upcall_mask != 0); \ - preempt_enable_no_resched(); \ - ___x; }) +#include /* * disable hlt during certain critical i/o operations