X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-x86_64%2Fsystem.h;fp=include%2Fasm-x86_64%2Fsystem.h;h=3975989802280ff1ad2ffc7322a68611ad2d5793;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=76165736e43a8c1b913c11ab224c907efe351577;hpb=cee37fe97739d85991964371c1f3a745c00dd236;p=linux-2.6.git diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h index 76165736e..397598980 100644 --- a/include/asm-x86_64/system.h +++ b/include/asm-x86_64/system.h @@ -20,8 +20,8 @@ #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t" /* frame pointer must be last for get_wchan */ -#define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t" -#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t" +#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t" +#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t" #define __EXTRA_CLOBBER \ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15" @@ -70,12 +70,6 @@ extern void load_gs_index(unsigned); ".previous" \ : :"r" (value), "r" (0)) -#define set_debug(value,register) \ - __asm__("movq %0,%%db" #register \ - : /* no output */ \ - :"r" ((unsigned long) value)) - - #ifdef __KERNEL__ struct alt_instr { __u8 *instr; /* original instruction */ @@ -116,12 +110,12 @@ struct alt_instr { /* * Alternative inline assembly with input. * - * Pecularities: + * Peculiarities: * No memory clobber here. * Argument numbers start with 1. * Best is to use constraints that are fixed size (like (%1) ... "r") * If you use variable sized constraints like "m" or "g" in the - * replacement maake sure to pad to the worst case length. + * replacement make sure to pad to the worst case length. */ #define alternative_input(oldinstr, newinstr, feature, input...) \ asm volatile ("661:\n\t" oldinstr "\n662:\n" \ @@ -137,6 +131,21 @@ struct alt_instr { "663:\n\t" newinstr "\n664:\n" /* replacement */ \ ".previous" :: "i" (feature), ##input) +/* Like alternative_input, but with a single output argument */ +#define alternative_io(oldinstr, newinstr, feature, output, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 8\n" \ + " .quad 661b\n" /* label */ \ + " .quad 663f\n" /* new instruction */ \ + " .byte %c[feat]\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" : output : [feat] "i" (feature), ##input) + /* * Clear and set 'TS' bit respectively */ @@ -178,6 +187,15 @@ static inline void write_cr4(unsigned long val) #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + */ +static inline void sched_cacheflush(void) +{ + wbinvd(); +} + #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") @@ -188,7 +206,7 @@ static inline void write_cr4(unsigned long val) #define __xg(x) ((volatile long *)(x)) -extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val) +static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) { *ptr = val; } @@ -253,19 +271,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, case 2: __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 4: __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; case 8: __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" : "=a"(prev) - : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "r"(new), "m"(*__xg(ptr)), "0"(old) : "memory"); return prev; } @@ -303,7 +321,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, #define wmb() asm volatile("" ::: "memory") #endif #define read_barrier_depends() do {} while(0) -#define set_mb(var, value) do { xchg(&var, value); } while (0) +#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) @@ -311,10 +329,24 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, /* interrupt control.. */ #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") + +#ifdef CONFIG_X86_VSMP +/* Interrupt control for VSMP architecture */ +#define local_irq_disable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags & ~(1 << 9)) | (1 << 18)); } while (0) +#define local_irq_enable() do { unsigned long flags; local_save_flags(flags); local_irq_restore((flags | (1 << 9)) & ~(1 << 18)); } while (0) + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + (flags & (1<<18)) || !(flags & (1<<9)); \ +}) + +/* For spinlocks etc */ +#define local_irq_save(x) do { local_save_flags(x); local_irq_restore((x & ~(1 << 9)) | (1 << 18)); } while (0) +#else /* CONFIG_X86_VSMP */ #define local_irq_disable() __asm__ __volatile__("cli": : :"memory") #define local_irq_enable() __asm__ __volatile__("sti": : :"memory") -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") #define irqs_disabled() \ ({ \ @@ -325,18 +357,14 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, /* For spinlocks etc */ #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) +#endif -void cpu_idle_wait(void); - -/* - * disable hlt during certain critical i/o operations - */ -#define HAVE_DISABLE_HLT -void disable_hlt(void); -void enable_hlt(void); +/* used in the idle loop; sti takes one instruction cycle to complete */ +#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") +/* used when interrupts are already enabled or to shutdown the processor */ +#define halt() __asm__ __volatile__("hlt": : :"memory") -#define HAVE_EAT_KEY -void eat_key(void); +void cpu_idle_wait(void); extern unsigned long arch_align_stack(unsigned long sp);