4 #include <linux/kernel.h>
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <linux/bitops.h> /* for LOCK_PREFIX */
8 #include <asm/synch_bitops.h>
9 #include <asm/hypervisor.h>
14 #define __vcpu_id smp_processor_id()
19 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
20 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
23 * Saving eflags is important. It switches not only IOPL between tasks,
24 * it also protects other tasks from NT leaking through sysenter etc.
26 #define switch_to(prev,next,last) do { \
27 unsigned long esi,edi; \
28 asm volatile("pushfl\n\t" /* Save flags */ \
30 "movl %%esp,%0\n\t" /* save ESP */ \
31 "movl %5,%%esp\n\t" /* restore ESP */ \
32 "movl $1f,%1\n\t" /* save EIP */ \
33 "pushl %6\n\t" /* restore EIP */ \
38 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
39 "=a" (last),"=S" (esi),"=D" (edi) \
40 :"m" (next->thread.esp),"m" (next->thread.eip), \
41 "2" (prev), "d" (next)); \
44 #define _set_base(addr,base) do { unsigned long __pr; \
45 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
46 "rorl $16,%%edx\n\t" \
56 #define _set_limit(addr,limit) do { unsigned long __lr; \
57 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
58 "rorl $16,%%edx\n\t" \
60 "andb $0xf0,%%dh\n\t" \
69 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
70 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
73 * Load a segment. Fall back on loading the zero
74 * segment if something goes wrong..
76 #define loadsegment(seg,value) \
79 "mov %0,%%" #seg "\n" \
81 ".section .fixup,\"ax\"\n" \
84 "popl %%" #seg "\n\t" \
87 ".section __ex_table,\"a\"\n\t" \
94 * Save a segment register away
96 #define savesegment(seg, value) \
97 asm volatile("mov %%" #seg ",%0":"=rm" (value))
99 #define read_cr0() ({ \
100 unsigned int __dummy; \
101 __asm__ __volatile__( \
102 "movl %%cr0,%0\n\t" \
106 #define write_cr0(x) \
107 __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
110 (HYPERVISOR_shared_info->vcpu_info[smp_processor_id()].arch.cr2)
111 #define write_cr2(x) \
112 __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
114 #define read_cr3() ({ \
115 unsigned int __dummy; \
117 "movl %%cr3,%0\n\t" \
119 __dummy = xen_cr3_to_pfn(__dummy); \
120 mfn_to_pfn(__dummy) << PAGE_SHIFT; \
122 #define write_cr3(x) ({ \
123 unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \
124 __dummy = xen_pfn_to_cr3(__dummy); \
125 __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \
128 #define read_cr4() ({ \
129 unsigned int __dummy; \
131 "movl %%cr4,%0\n\t" \
135 #define read_cr4_safe() ({ \
136 unsigned int __dummy; \
137 /* This could fault if %cr4 does not exist */ \
138 __asm__("1: movl %%cr4, %0 \n" \
140 ".section __ex_table,\"a\" \n" \
143 : "=r" (__dummy): "0" (0)); \
147 #define write_cr4(x) \
148 __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
150 * Clear and set 'TS' bit respectively
152 #define clts() (HYPERVISOR_fpu_taskswitch(0))
153 #define stts() (HYPERVISOR_fpu_taskswitch(1))
155 #endif /* __KERNEL__ */
158 __asm__ __volatile__ ("wbinvd": : :"memory")
160 static inline unsigned long get_limit(unsigned long segment)
162 unsigned long __limit;
164 :"=r" (__limit):"r" (segment));
168 #define nop() __asm__ __volatile__ ("nop")
170 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
172 #define tas(ptr) (xchg((ptr),1))
174 struct __xchg_dummy { unsigned long a[100]; };
175 #define __xg(x) ((struct __xchg_dummy *)(x))
178 #ifdef CONFIG_X86_CMPXCHG64
181 * The semantics of XCHGCMP8B are a bit strange, this is why
182 * there is a loop and the loading of %%eax and %%edx has to
183 * be inside. This inlines well in most cases, the cached
184 * cost is around ~38 cycles. (in the future we might want
185 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
186 * might have an implicit FPU-save as a cost, so it's not
187 * clear which path to go.)
189 * cmpxchg8b must be used with the lock prefix here to allow
190 * the instruction to be executed atomically, see page 3-102
191 * of the instruction set reference 24319102.pdf. We need
192 * the reader side to see the coherent 64bit value.
194 static inline void __set_64bit (unsigned long long * ptr,
195 unsigned int low, unsigned int high)
197 __asm__ __volatile__ (
199 "movl (%0), %%eax\n\t"
200 "movl 4(%0), %%edx\n\t"
201 "lock cmpxchg8b (%0)\n\t"
207 : "ax","dx","memory");
210 static inline void __set_64bit_constant (unsigned long long *ptr,
211 unsigned long long value)
213 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
215 #define ll_low(x) *(((unsigned int*)&(x))+0)
216 #define ll_high(x) *(((unsigned int*)&(x))+1)
218 static inline void __set_64bit_var (unsigned long long *ptr,
219 unsigned long long value)
221 __set_64bit(ptr,ll_low(value), ll_high(value));
224 #define set_64bit(ptr,value) \
225 (__builtin_constant_p(value) ? \
226 __set_64bit_constant(ptr, value) : \
227 __set_64bit_var(ptr, value) )
229 #define _set_64bit(ptr,value) \
230 (__builtin_constant_p(value) ? \
231 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
232 __set_64bit(ptr, ll_low(value), ll_high(value)) )
237 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
238 * Note 2: xchg has side effect, so that attribute volatile is necessary,
239 * but generally the primitive is invalid, *ptr is output argument. --ANK
241 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
245 __asm__ __volatile__("xchgb %b0,%1"
247 :"m" (*__xg(ptr)), "0" (x)
251 __asm__ __volatile__("xchgw %w0,%1"
253 :"m" (*__xg(ptr)), "0" (x)
257 __asm__ __volatile__("xchgl %0,%1"
259 :"m" (*__xg(ptr)), "0" (x)
267 * Atomic compare and exchange. Compare OLD with MEM, if identical,
268 * store NEW in MEM. Return the initial value in MEM. Success is
269 * indicated by comparing RETURN with OLD.
272 #ifdef CONFIG_X86_CMPXCHG
273 #define __HAVE_ARCH_CMPXCHG 1
274 #define cmpxchg(ptr,o,n)\
275 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
276 (unsigned long)(n),sizeof(*(ptr))))
279 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
280 unsigned long new, int size)
285 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
287 : "q"(new), "m"(*__xg(ptr)), "0"(old)
291 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
293 : "r"(new), "m"(*__xg(ptr)), "0"(old)
297 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
299 : "r"(new), "m"(*__xg(ptr)), "0"(old)
306 #ifndef CONFIG_X86_CMPXCHG
308 * Building a kernel capable running on 80386. It may be necessary to
309 * simulate the cmpxchg on the 80386 CPU. For that purpose we define
310 * a function for each of the sizes we support.
313 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
314 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
315 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
317 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
318 unsigned long new, int size)
322 return cmpxchg_386_u8(ptr, old, new);
324 return cmpxchg_386_u16(ptr, old, new);
326 return cmpxchg_386_u32(ptr, old, new);
331 #define cmpxchg(ptr,o,n) \
333 __typeof__(*(ptr)) __ret; \
334 if (likely(boot_cpu_data.x86 > 3)) \
335 __ret = __cmpxchg((ptr), (unsigned long)(o), \
336 (unsigned long)(n), sizeof(*(ptr))); \
338 __ret = cmpxchg_386((ptr), (unsigned long)(o), \
339 (unsigned long)(n), sizeof(*(ptr))); \
344 #ifdef CONFIG_X86_CMPXCHG64
346 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
347 unsigned long long new)
349 unsigned long long prev;
350 __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
352 : "b"((unsigned long)new),
353 "c"((unsigned long)(new >> 32)),
360 #define cmpxchg64(ptr,o,n)\
361 ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
362 (unsigned long long)(n)))
367 * Force strict CPU ordering.
368 * And yes, this is required on UP too when we're talking
371 * For now, "wmb()" doesn't actually do anything, as all
372 * Intel CPU's follow what Intel calls a *Processor Order*,
373 * in which all writes are seen in the program order even
376 * I expect future Intel CPU's to have a weaker ordering,
377 * but I'd also expect them to finally get their act together
378 * and add some real memory barriers if so.
380 * Some non intel clones support out of order store. wmb() ceases to be a
386 * Actually only lfence would be needed for mb() because all stores done
387 * by the kernel should be already ordered. But keep a full barrier for now.
390 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
391 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
394 * read_barrier_depends - Flush all pending reads that subsequents reads
397 * No data-dependent reads from memory-like regions are ever reordered
398 * over this barrier. All reads preceding this primitive are guaranteed
399 * to access memory (but not necessarily other CPUs' caches) before any
400 * reads following this primitive that depend on the data return by
401 * any of the preceding reads. This primitive is much lighter weight than
402 * rmb() on most CPUs, and is never heavier weight than is
405 * These ordering constraints are respected by both the local CPU
408 * Ordering is not guaranteed by anything other than these primitives,
409 * not even by data dependencies. See the documentation for
410 * memory_barrier() for examples and URLs to more information.
412 * For example, the following code would force ordering (the initial
413 * value of "a" is zero, "b" is one, and "p" is "&a"):
421 * read_barrier_depends();
425 * because the read of "*q" depends on the read of "p" and these
426 * two reads are separated by a read_barrier_depends(). However,
427 * the following code, with the same initial values for "a" and "b":
435 * read_barrier_depends();
439 * does not enforce ordering, since there is no data dependency between
440 * the read of "a" and the read of "b". Therefore, on some CPUs, such
441 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
442 * in cases like this where there are no data dependencies.
445 #define read_barrier_depends() do { } while(0)
447 #ifdef CONFIG_X86_OOSTORE
448 /* Actually there are no OOO store capable CPUs for now that do SSE,
449 but make it already an possibility. */
450 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
452 #define wmb() __asm__ __volatile__ ("": : :"memory")
456 #define smp_mb() mb()
457 #define smp_rmb() rmb()
458 #define smp_wmb() wmb()
459 #define smp_read_barrier_depends() read_barrier_depends()
460 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
462 #define smp_mb() barrier()
463 #define smp_rmb() barrier()
464 #define smp_wmb() barrier()
465 #define smp_read_barrier_depends() do { } while(0)
466 #define set_mb(var, value) do { var = value; barrier(); } while (0)
469 #include <linux/irqflags.h>
472 * disable hlt during certain critical i/o operations
474 #define HAVE_DISABLE_HLT
475 void disable_hlt(void);
476 void enable_hlt(void);
478 extern int es7000_plat;
479 void cpu_idle_wait(void);
482 * On SMP systems, when the scheduler does migration-cost autodetection,
483 * it needs a way to flush as much of the CPU's caches as possible:
485 static inline void sched_cacheflush(void)
490 extern unsigned long arch_align_stack(unsigned long sp);
491 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
493 void default_idle(void);