4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
7 #include <asm/cpufeature.h>
8 #include <linux/bitops.h> /* for LOCK_PREFIX */
12 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
13 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
16 * Saving eflags is important. It switches not only IOPL between tasks,
17 * it also protects other tasks from NT leaking through sysenter etc.
19 #define switch_to(prev,next,last) do { \
20 unsigned long esi,edi; \
21 asm volatile("pushfl\n\t" /* Save flags */ \
23 "movl %%esp,%0\n\t" /* save ESP */ \
24 "movl %5,%%esp\n\t" /* restore ESP */ \
25 "movl $1f,%1\n\t" /* save EIP */ \
26 "pushl %6\n\t" /* restore EIP */ \
31 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
32 "=a" (last),"=S" (esi),"=D" (edi) \
33 :"m" (next->thread.esp),"m" (next->thread.eip), \
34 "2" (prev), "d" (next)); \
37 #define _set_base(addr,base) do { unsigned long __pr; \
38 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
39 "rorl $16,%%edx\n\t" \
49 #define _set_limit(addr,limit) do { unsigned long __lr; \
50 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
51 "rorl $16,%%edx\n\t" \
53 "andb $0xf0,%%dh\n\t" \
62 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
63 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
66 * Load a segment. Fall back on loading the zero
67 * segment if something goes wrong..
69 #define loadsegment(seg,value) \
72 "mov %0,%%" #seg "\n" \
74 ".section .fixup,\"ax\"\n" \
77 "popl %%" #seg "\n\t" \
80 ".section __ex_table,\"a\"\n\t" \
87 * Save a segment register away
89 #define savesegment(seg, value) \
90 asm volatile("mov %%" #seg ",%0":"=rm" (value))
93 * Clear and set 'TS' bit respectively
95 #define clts() __asm__ __volatile__ ("clts")
96 #define read_cr0() ({ \
97 unsigned int __dummy; \
98 __asm__ __volatile__( \
103 #define write_cr0(x) \
104 __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
106 #define read_cr2() ({ \
107 unsigned int __dummy; \
108 __asm__ __volatile__( \
109 "movl %%cr2,%0\n\t" \
113 #define write_cr2(x) \
114 __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
116 #define read_cr3() ({ \
117 unsigned int __dummy; \
119 "movl %%cr3,%0\n\t" \
123 #define write_cr3(x) \
124 __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
126 #define read_cr4() ({ \
127 unsigned int __dummy; \
129 "movl %%cr4,%0\n\t" \
134 #define read_cr4_safe() ({ \
135 unsigned int __dummy; \
136 /* This could fault if %cr4 does not exist */ \
137 __asm__("1: movl %%cr4, %0 \n" \
139 ".section __ex_table,\"a\" \n" \
142 : "=r" (__dummy): "0" (0)); \
146 #define write_cr4(x) \
147 __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
148 #define stts() write_cr0(8 | read_cr0())
150 #endif /* __KERNEL__ */
153 __asm__ __volatile__ ("wbinvd": : :"memory");
155 static inline unsigned long get_limit(unsigned long segment)
157 unsigned long __limit;
159 :"=r" (__limit):"r" (segment));
163 #define nop() __asm__ __volatile__ ("nop")
165 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
167 #define tas(ptr) (xchg((ptr),1))
169 struct __xchg_dummy { unsigned long a[100]; };
170 #define __xg(x) ((struct __xchg_dummy *)(x))
173 #ifdef CONFIG_X86_CMPXCHG64
176 * The semantics of XCHGCMP8B are a bit strange, this is why
177 * there is a loop and the loading of %%eax and %%edx has to
178 * be inside. This inlines well in most cases, the cached
179 * cost is around ~38 cycles. (in the future we might want
180 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
181 * might have an implicit FPU-save as a cost, so it's not
182 * clear which path to go.)
184 * cmpxchg8b must be used with the lock prefix here to allow
185 * the instruction to be executed atomically, see page 3-102
186 * of the instruction set reference 24319102.pdf. We need
187 * the reader side to see the coherent 64bit value.
189 static inline void __set_64bit (unsigned long long * ptr,
190 unsigned int low, unsigned int high)
192 __asm__ __volatile__ (
194 "movl (%0), %%eax\n\t"
195 "movl 4(%0), %%edx\n\t"
196 "lock cmpxchg8b (%0)\n\t"
202 : "ax","dx","memory");
205 static inline void __set_64bit_constant (unsigned long long *ptr,
206 unsigned long long value)
208 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
210 #define ll_low(x) *(((unsigned int*)&(x))+0)
211 #define ll_high(x) *(((unsigned int*)&(x))+1)
213 static inline void __set_64bit_var (unsigned long long *ptr,
214 unsigned long long value)
216 __set_64bit(ptr,ll_low(value), ll_high(value));
219 #define set_64bit(ptr,value) \
220 (__builtin_constant_p(value) ? \
221 __set_64bit_constant(ptr, value) : \
222 __set_64bit_var(ptr, value) )
224 #define _set_64bit(ptr,value) \
225 (__builtin_constant_p(value) ? \
226 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
227 __set_64bit(ptr, ll_low(value), ll_high(value)) )
232 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
233 * Note 2: xchg has side effect, so that attribute volatile is necessary,
234 * but generally the primitive is invalid, *ptr is output argument. --ANK
236 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
240 __asm__ __volatile__("xchgb %b0,%1"
242 :"m" (*__xg(ptr)), "0" (x)
246 __asm__ __volatile__("xchgw %w0,%1"
248 :"m" (*__xg(ptr)), "0" (x)
252 __asm__ __volatile__("xchgl %0,%1"
254 :"m" (*__xg(ptr)), "0" (x)
262 * Atomic compare and exchange. Compare OLD with MEM, if identical,
263 * store NEW in MEM. Return the initial value in MEM. Success is
264 * indicated by comparing RETURN with OLD.
267 #ifdef CONFIG_X86_CMPXCHG
268 #define __HAVE_ARCH_CMPXCHG 1
269 #define cmpxchg(ptr,o,n)\
270 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
271 (unsigned long)(n),sizeof(*(ptr))))
274 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
275 unsigned long new, int size)
280 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
282 : "q"(new), "m"(*__xg(ptr)), "0"(old)
286 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
288 : "r"(new), "m"(*__xg(ptr)), "0"(old)
292 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
294 : "r"(new), "m"(*__xg(ptr)), "0"(old)
301 #ifndef CONFIG_X86_CMPXCHG
303 * Building a kernel capable running on 80386. It may be necessary to
304 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
305 * a function for each of the sizes we support.
308 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
309 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
310 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
312 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
313 unsigned long new, int size)
317 return cmpxchg_386_u8(ptr, old, new);
319 return cmpxchg_386_u16(ptr, old, new);
321 return cmpxchg_386_u32(ptr, old, new);
326 #define cmpxchg(ptr,o,n) \
328 __typeof__(*(ptr)) __ret; \
329 if (likely(boot_cpu_data.x86 > 3)) \
330 __ret = __cmpxchg((ptr), (unsigned long)(o), \
331 (unsigned long)(n), sizeof(*(ptr))); \
333 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
334 (unsigned long)(n), sizeof(*(ptr))); \
339 #ifdef CONFIG_X86_CMPXCHG64
341 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
342 unsigned long long new)
344 unsigned long long prev;
345 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
347 : "b"((unsigned long)new),
348 "c"((unsigned long)(new >> 32)),
355 #define cmpxchg64(ptr,o,n)\
356 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
357 (unsigned long long)(n)))
363 __u8 *instr; /* original instruction */
365 __u8 cpuid; /* cpuid bit set for replacement */
366 __u8 instrlen; /* length of original instruction */
367 __u8 replacementlen; /* length of new instruction, <= instrlen */
373 * Alternative instructions for different CPU types or capabilities.
375 * This allows to use optimized instructions even on generic binary
378 * length of oldinstr must be longer or equal the length of newinstr
379 * It can be padded with nops as needed.
381 * For non barrier like inlines please define new variants
382 * without volatile and memory clobber.
384 #define alternative(oldinstr, newinstr, feature) \
385 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
386 ".section .altinstructions,\"a\"\n" \
388 " .long 661b\n" /* label */ \
389 " .long 663f\n" /* new instruction */ \
390 " .byte %c0\n" /* feature bit */ \
391 " .byte 662b-661b\n" /* sourcelen */ \
392 " .byte 664f-663f\n" /* replacementlen */ \
394 ".section .altinstr_replacement,\"ax\"\n" \
395 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
396 ".previous" :: "i" (feature) : "memory")
399 * Alternative inline assembly with input.
402 * No memory clobber here.
403 * Argument numbers start with 1.
404 * Best is to use constraints that are fixed size (like (%1) ... "r")
405 * If you use variable sized constraints like "m" or "g" in the
406 * replacement maake sure to pad to the worst case length.
408 #define alternative_input(oldinstr, newinstr, feature, input...) \
409 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
410 ".section .altinstructions,\"a\"\n" \
412 " .long 661b\n" /* label */ \
413 " .long 663f\n" /* new instruction */ \
414 " .byte %c0\n" /* feature bit */ \
415 " .byte 662b-661b\n" /* sourcelen */ \
416 " .byte 664f-663f\n" /* replacementlen */ \
418 ".section .altinstr_replacement,\"ax\"\n" \
419 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
420 ".previous" :: "i" (feature), ##input)
423 * Force strict CPU ordering.
424 * And yes, this is required on UP too when we're talking
427 * For now, "wmb()" doesn't actually do anything, as all
428 * Intel CPU's follow what Intel calls a *Processor Order*,
429 * in which all writes are seen in the program order even
432 * I expect future Intel CPU's to have a weaker ordering,
433 * but I'd also expect them to finally get their act together
434 * and add some real memory barriers if so.
436 * Some non intel clones support out of order store. wmb() ceases to be a
442 * Actually only lfence would be needed for mb() because all stores done
443 * by the kernel should be already ordered. But keep a full barrier for now.
446 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
447 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
450 * read_barrier_depends - Flush all pending reads that subsequents reads
453 * No data-dependent reads from memory-like regions are ever reordered
454 * over this barrier. All reads preceding this primitive are guaranteed
455 * to access memory (but not necessarily other CPUs' caches) before any
456 * reads following this primitive that depend on the data return by
457 * any of the preceding reads. This primitive is much lighter weight than
458 * rmb() on most CPUs, and is never heavier weight than is
461 * These ordering constraints are respected by both the local CPU
464 * Ordering is not guaranteed by anything other than these primitives,
465 * not even by data dependencies. See the documentation for
466 * memory_barrier() for examples and URLs to more information.
468 * For example, the following code would force ordering (the initial
469 * value of "a" is zero, "b" is one, and "p" is "&a"):
477 * read_barrier_depends();
481 * because the read of "*q" depends on the read of "p" and these
482 * two reads are separated by a read_barrier_depends(). However,
483 * the following code, with the same initial values for "a" and "b":
491 * read_barrier_depends();
495 * does not enforce ordering, since there is no data dependency between
496 * the read of "a" and the read of "b". Therefore, on some CPUs, such
497 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
498 * in cases like thiswhere there are no data dependencies.
501 #define read_barrier_depends() do { } while(0)
503 #ifdef CONFIG_X86_OOSTORE
504 /* Actually there are no OOO store capable CPUs for now that do SSE,
505 but make it already an possibility. */
506 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
508 #define wmb() __asm__ __volatile__ ("": : :"memory")
512 #define smp_mb() mb()
513 #define smp_rmb() rmb()
514 #define smp_wmb() wmb()
515 #define smp_read_barrier_depends() read_barrier_depends()
516 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
518 #define smp_mb() barrier()
519 #define smp_rmb() barrier()
520 #define smp_wmb() barrier()
521 #define smp_read_barrier_depends() do { } while(0)
522 #define set_mb(var, value) do { var = value; barrier(); } while (0)
525 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
527 /* interrupt control.. */
528 #define local_save_flags(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
529 #define local_irq_restore(x) do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
530 #define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
531 #define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
532 /* used in the idle loop; sti takes one instruction cycle to complete */
533 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
534 /* used when interrupts are already enabled or to shutdown the processor */
535 #define halt() __asm__ __volatile__("hlt": : :"memory")
537 #define irqs_disabled() \
539 unsigned long flags; \
540 local_save_flags(flags); \
544 /* For spinlocks etc */
545 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
548 * disable hlt during certain critical i/o operations
550 #define HAVE_DISABLE_HLT
551 void disable_hlt(void);
552 void enable_hlt(void);
554 extern int es7000_plat;
555 void cpu_idle_wait(void);
558 * On SMP systems, when the scheduler does migration-cost autodetection,
559 * it needs a way to flush as much of the CPU's caches as possible:
561 static inline void sched_cacheflush(void)
566 extern unsigned long arch_align_stack(unsigned long sp);