X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-i386%2Fsystem.h;fp=include%2Fasm-i386%2Fsystem.h;h=ec36513239bd89a9f3309f251c80a2a62fd7407e;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=098bcee94e389a615b3b2327fa2be03f117c28c3;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 098bcee94..ec3651323 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -1,6 +1,7 @@ #ifndef __ASM_SYSTEM_H #define __ASM_SYSTEM_H +#include #include #include #include @@ -88,6 +89,10 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) +/* + * Clear and set 'TS' bit respectively + */ +#define clts() __asm__ __volatile__ ("clts") #define read_cr0() ({ \ unsigned int __dummy; \ __asm__ __volatile__( \ @@ -96,7 +101,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ __dummy; \ }) #define write_cr0(x) \ - __asm__ __volatile__("movl %0,%%cr0": :"r" (x)) + __asm__ __volatile__("movl %0,%%cr0": :"r" (x)); #define read_cr2() ({ \ unsigned int __dummy; \ @@ -106,7 +111,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ __dummy; \ }) #define write_cr2(x) \ - __asm__ __volatile__("movl %0,%%cr2": :"r" (x)) + __asm__ __volatile__("movl %0,%%cr2": :"r" (x)); #define read_cr3() ({ \ unsigned int __dummy; \ @@ -116,7 +121,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ __dummy; \ }) #define write_cr3(x) \ - __asm__ __volatile__("movl %0,%%cr3": :"r" (x)) + __asm__ __volatile__("movl %0,%%cr3": :"r" (x)); #define read_cr4() ({ \ unsigned int __dummy; \ @@ -125,6 +130,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ :"=r" (__dummy)); \ __dummy; \ }) + #define read_cr4_safe() ({ \ unsigned int __dummy; \ /* This could fault if %cr4 does not exist */ \ @@ -136,19 +142,15 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ : "=r" (__dummy): "0" (0)); \ __dummy; \ }) -#define write_cr4(x) \ - __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) -/* - * Clear and set 'TS' bit respectively - */ -#define clts() __asm__ __volatile__ ("clts") +#define write_cr4(x) \ + __asm__ __volatile__("movl %0,%%cr4": :"r" (x)); #define stts() write_cr0(8 | read_cr0()) #endif /* __KERNEL__ */ #define wbinvd() \ - __asm__ __volatile__ ("wbinvd": : :"memory") + __asm__ __volatile__ ("wbinvd": : :"memory"); static inline unsigned long get_limit(unsigned long segment) { @@ -356,6 +358,67 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l #endif +#ifdef __KERNEL__ +struct alt_instr { + __u8 *instr; /* original instruction */ + __u8 *replacement; + __u8 cpuid; /* cpuid bit set for replacement */ + __u8 instrlen; /* length of original instruction */ + __u8 replacementlen; /* length of new instruction, <= instrlen */ + __u8 pad; +}; +#endif + +/* + * Alternative instructions for different CPU types or capabilities. + * + * This allows to use optimized instructions even on generic binary + * kernels. + * + * length of oldinstr must be longer or equal the length of newinstr + * It can be padded with nops as needed. + * + * For non barrier like inlines please define new variants + * without volatile and memory clobber. + */ +#define alternative(oldinstr, newinstr, feature) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature) : "memory") + +/* + * Alternative inline assembly with input. + * + * Pecularities: + * No memory clobber here. + * Argument numbers start with 1. + * Best is to use constraints that are fixed size (like (%1) ... "r") + * If you use variable sized constraints like "m" or "g" in the + * replacement maake sure to pad to the worst case length. + */ +#define alternative_input(oldinstr, newinstr, feature, input...) \ + asm volatile ("661:\n\t" oldinstr "\n662:\n" \ + ".section .altinstructions,\"a\"\n" \ + " .align 4\n" \ + " .long 661b\n" /* label */ \ + " .long 663f\n" /* new instruction */ \ + " .byte %c0\n" /* feature bit */ \ + " .byte 662b-661b\n" /* sourcelen */ \ + " .byte 664f-663f\n" /* replacementlen */ \ + ".previous\n" \ + ".section .altinstr_replacement,\"ax\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" :: "i" (feature), ##input) + /* * Force strict CPU ordering. * And yes, this is required on UP too when we're talking @@ -432,7 +495,7 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l * does not enforce ordering, since there is no data dependency between * the read of "a" and the read of "b". Therefore, on some CPUs, such * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. + * in cases like thiswhere there are no data dependencies. **/ #define read_barrier_depends() do { } while(0) @@ -459,7 +522,27 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long l #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif -#include +#define set_wmb(var, value) do { var = value; wmb(); } while (0) + +/* interrupt control.. */ +#define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0) +#define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0) +#define local_irq_disable() __asm__ __volatile__("cli": : :"memory") +#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") +/* used in the idle loop; sti takes one instruction cycle to complete */ +#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") +/* used when interrupts are already enabled or to shutdown the processor */ +#define halt() __asm__ __volatile__("hlt": : :"memory") + +#define irqs_disabled() \ +({ \ + unsigned long flags; \ + local_save_flags(flags); \ + !(flags & (1<<9)); \ +}) + +/* For spinlocks etc */ +#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") /* * disable hlt during certain critical i/o operations @@ -481,8 +564,5 @@ static inline void sched_cacheflush(void) } extern unsigned long arch_align_stack(unsigned long sp); -extern void free_init_pages(char *what, unsigned long begin, unsigned long end); - -void default_idle(void); #endif