X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fsystem.h;h=a6d20d9a1a307b0f6bbf55351e91fb26f48ad6b0;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=ec36513239bd89a9f3309f251c80a2a62fd7407e;hpb=76828883507a47dae78837ab5dec5a5b4513c667;p=linux-2.6.git diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index ec3651323..a6d20d9a1 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -1,7 +1,6 @@ #ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H -#include #include #include #include @@ -89,10 +88,9 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) -/* - * Clear and set 'TS' bit respectively - */ -#define clts() __asm__ __volatile__ ("clts") +#ifdef CONFIG_PARAVIRT +#include +#else #define read_cr0() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ @@ -101,7 +99,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ __dummy; \ }) #define write_cr0(x) \ - __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); + __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) #define read_cr2() ({ \ unsigned int __dummy; \ @@ -111,7 +109,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ __dummy; \ }) #define write_cr2(x) \ - __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); + __asm__ __volatile__("movl %0,%%cr2": :"r" (x)) #define read_cr3() ({ \ unsigned int __dummy; \ @@ -121,7 +119,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ __dummy; \ }) #define write_cr3(x) \ - __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); + __asm__ __volatile__("movl %0,%%cr3": :"r" (x)) #define read_cr4() ({ \ unsigned int __dummy; \ @@ -130,7 +128,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) - #define read_cr4_safe() ({ \ unsigned int __dummy; \ /* This could fault if %cr4 does not exist */ \ @@ -142,16 +139,21 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ : "=r" (__dummy): "0" (0)); \ __dummy; \ }) - #define write_cr4(x) \ - __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); + __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) + +#define wbinvd() \ + __asm__ __volatile__ ("wbinvd": : :"memory") + +/* Clear the 'TS' bit */ +#define clts() __asm__ __volatile__ ("clts") +#endif/* CONFIG_PARAVIRT */ + +/* Set the 'TS' bit */ #define stts() write_cr0(8 | read_cr0()) #endif /* __KERNEL__ */ -#define wbinvd() \ - __asm__ __volatile__ ("wbinvd": : :"memory"); - static inline unsigned long get_limit(unsigned long segment) { unsigned long __limit; @@ -269,6 +271,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz #define cmpxchg(ptr,o,n)\ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ (unsigned long)(n),sizeof(*(ptr)))) +#define sync_cmpxchg(ptr,o,n)\ + ((__typeof__(*(ptr)))__sync_cmpxchg((ptr),(unsigned long)(o),\ + (unsigned long)(n),sizeof(*(ptr)))) #endif static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, @@ -298,6 +303,39 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, return old; } +/* + * Always use locked operations when touching memory shared with a + * hypervisor, since the system may be SMP even if the guest kernel + * isn't. + */ +static inline unsigned long __sync_cmpxchg(volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + unsigned long prev; + switch (size) { + case 1: + __asm__ __volatile__("lock; cmpxchgb %b1,%2" + : "=a"(prev) + : "q"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 2: + __asm__ __volatile__("lock; cmpxchgw %w1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + case 4: + __asm__ __volatile__("lock; cmpxchgl %1,%2" + : "=a"(prev) + : "r"(new), "m"(*__xg(ptr)), "0"(old) + : "memory"); + return prev; + } + return old; +} + #ifndef CONFIG_X86_CMPXCHG /* * Building a kernel capable running on 80386. It may be necessary to @@ -358,67 +396,6 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l #endif -#ifdef __KERNEL__ -struct alt_instr { - __u8 *instr; /* original instruction */ - __u8 *replacement; - __u8 cpuid; /* cpuid bit set for replacement */ - __u8 instrlen; /* length of original instruction */ - __u8 replacementlen; /* length of new instruction, <= instrlen */ - __u8 pad; -}; -#endif - -/* - * Alternative instructions for different CPU types or capabilities. - * - * This allows to use optimized instructions even on generic binary - * kernels. - * - * length of oldinstr must be longer or equal the length of newinstr - * It can be padded with nops as needed. - * - * For non barrier like inlines please define new variants - * without volatile and memory clobber. - */ -#define alternative(oldinstr, newinstr, feature) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature) : "memory") - -/* - * Alternative inline assembly with input. - * - * Pecularities: - * No memory clobber here. - * Argument numbers start with 1. - * Best is to use constraints that are fixed size (like (%1) ... "r") - * If you use variable sized constraints like "m" or "g" in the - * replacement maake sure to pad to the worst case length. - */ -#define alternative_input(oldinstr, newinstr, feature, input...) \ - asm volatile ("661:\n\t" oldinstr "\n662:\n" \ - ".section .altinstructions,\"a\"\n" \ - " .align 4\n" \ - " .long 661b\n" /* label */ \ - " .long 663f\n" /* new instruction */ \ - " .byte %c0\n" /* feature bit */ \ - " .byte 662b-661b\n" /* sourcelen */ \ - " .byte 664f-663f\n" /* replacementlen */ \ - ".previous\n" \ - ".section .altinstr_replacement,\"ax\"\n" \ - "663:\n\t" newinstr "\n664:\n" /* replacement */ \ - ".previous" :: "i" (feature), ##input) - /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking @@ -495,7 +472,7 @@ struct alt_instr { * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like thiswhere there are no data dependencies. + * in cases like this where there are no data dependencies. **/ #define read_barrier_depends() do { } while(0) @@ -522,27 +499,7 @@ struct alt_instr { #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -/* interrupt control.. */ -#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) -#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) -#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") -#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") -/* used in the idle loop; sti takes one instruction cycle to complete */ -#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") -/* used when interrupts are already enabled or to shutdown the processor */ -#define halt() __asm__ __volatile__("hlt": : :"memory") - -#define irqs_disabled() \ -({ \ - unsigned long flags; \ - local_save_flags(flags); \ - !(flags & (1<<9)); \ -}) - -/* For spinlocks etc */ -#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") +#include /* * disable hlt during certain critical i/o operations @@ -564,5 +521,8 @@ static inline void sched_cacheflush(void) } extern unsigned long arch_align_stack(unsigned long sp); +extern void free_init_pages(char *what, unsigned long begin, unsigned long end); + +void default_idle(void); #endif