X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Fmach-xen%2Fasm%2Fsystem.h;h=7d8b8822029ebeec8e0b53cc5f1acef9bb054343;hb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;hp=2088f6122b9ac272508938ce8cc714faf8e6d6d0;hpb=1db395853d4f30d6120458bd279ede1f882a8525;p=linux-2.6.git diff --git a/include/asm-x86_64/mach-xen/asm/system.h b/include/asm-x86_64/mach-xen/asm/system.h index 2088f6122..7d8b88220 100644 --- a/include/asm-x86_64/mach-xen/asm/system.h +++ b/include/asm-x86_64/mach-xen/asm/system.h @@ -1,7 +1,6 @@ #ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H -#include #include #include #include @@ -16,12 +15,6 @@ #define __vcpu_id 0 #endif -#ifdef CONFIG_SMP -#define LOCK_PREFIX "lock ; " -#else -#define LOCK_PREFIX "" -#endif - #define __STR(x) #x #define STR(x) __STR(x) @@ -44,7 +37,7 @@ "thread_return:\n\t" \ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \ "movq %P[thread_info](%%rsi),%%r8\n\t" \ - LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ + LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \ "movq %%rax,%%rdi\n\t" \ "jc ret_from_fork\n\t" \ RESTORE_CONTEXT \ @@ -80,82 +73,6 @@ extern void load_gs_index(unsigned); ".previous" \ : :"r" (value), "r" (0)) -#ifdef __KERNEL__ -struct alt_instr { - __u8 *instr; /* original instruction */ - __u8 *replacement; - __u8 cpuid; /* cpuid bit set for replacement */ - __u8 instrlen; /* length of original instruction */ - __u8 replacementlen; /* length of new instruction, <= instrlen */ - __u8 pad[5]; -}; -#endif - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Peculiarities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement make sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature), ##input) - -/* Like alternative_input, but with a single output argument */ -#define alternative_io(oldinstr, newinstr, feature, output, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 8\n" \ - " .quad 661b\n" /* label */ \ - " .quad 663f\n" /* new instruction */ \ - " .byte %c[feat]\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" : output : [feat] "i" (feature), ##input) - /* * Clear and set 'TS' bit respectively */ @@ -331,98 +248,17 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #endif #define read_barrier_depends() do {} while(0) #define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) - -/* - * The use of 'barrier' in the following reflects their use as local-lock - * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following - * critical operations are executed. All critical operations must complete - * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also - * includes these barriers, for example. - */ - -#define __cli() \ -do { \ - vcpu_info_t *_vcpu; \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - _vcpu->evtchn_upcall_mask = 1; \ - preempt_enable_no_resched(); \ - barrier(); \ -} while (0) - -#define __sti() \ -do { \ - vcpu_info_t *_vcpu; \ - barrier(); \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - _vcpu->evtchn_upcall_mask = 0; \ - barrier(); /* unmask then check (avoid races) */ \ - if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ - force_evtchn_callback(); \ - preempt_enable(); \ -} while (0) - -#define __save_flags(x) \ -do { \ - vcpu_info_t *_vcpu; \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - (x) = _vcpu->evtchn_upcall_mask; \ - preempt_enable(); \ -} while (0) - -#define __restore_flags(x) \ -do { \ - vcpu_info_t *_vcpu; \ - barrier(); \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ - barrier(); /* unmask then check (avoid races) */ \ - if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ - force_evtchn_callback(); \ - preempt_enable(); \ - } else \ - preempt_enable_no_resched(); \ -} while (0) - -#define __save_and_cli(x) \ -do { \ - vcpu_info_t *_vcpu; \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - (x) = _vcpu->evtchn_upcall_mask; \ - _vcpu->evtchn_upcall_mask = 1; \ - preempt_enable_no_resched(); \ - barrier(); \ -} while (0) - -#define local_irq_save(x) __save_and_cli(x) -#define local_irq_restore(x) __restore_flags(x) -#define local_save_flags(x) __save_flags(x) -#define local_irq_disable() __cli() -#define local_irq_enable() __sti() - -/* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */ -#define irqs_disabled() \ -({ int ___x; \ - vcpu_info_t *_vcpu; \ - preempt_disable(); \ - _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \ - ___x = (_vcpu->evtchn_upcall_mask != 0); \ - preempt_enable_no_resched(); \ - ___x; }) - void safe_halt(void); void halt(void); +#include + void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp); +extern void free_init_pages(char *what, unsigned long begin, unsigned long end); #endif