4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
7 #include <asm/synch_bitops.h>
8 #include <asm/hypervisor.h>
9 #include <xen/interface/arch-x86_64.h>
14 #define __vcpu_id smp_processor_id()
20 #define LOCK_PREFIX "lock ; "
22 #define LOCK_PREFIX ""
26 #define STR(x) __STR(x)
28 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
29 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
31 /* frame pointer must be last for get_wchan */
32 #define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
33 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\n\t"
35 #define __EXTRA_CLOBBER \
36 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
38 #define switch_to(prev,next,last) \
39 asm volatile(SAVE_CONTEXT \
40 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
41 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
42 "call __switch_to\n\t" \
43 ".globl thread_return\n" \
44 "thread_return:\n\t" \
45 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
46 "movq %P[thread_info](%%rsi),%%r8\n\t" \
47 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
48 "movq %%rax,%%rdi\n\t" \
49 "jc ret_from_fork\n\t" \
52 : [next] "S" (next), [prev] "D" (prev), \
53 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
54 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
55 [tif_fork] "i" (TIF_FORK), \
56 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
57 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
58 : "memory", "cc" __EXTRA_CLOBBER)
61 extern void load_gs_index(unsigned);
64 * Load a segment. Fall back on loading the zero
65 * segment if something goes wrong..
67 #define loadsegment(seg,value) \
70 "movl %k0,%%" #seg "\n" \
72 ".section .fixup,\"ax\"\n" \
74 "movl %1,%%" #seg "\n\t" \
77 ".section __ex_table,\"a\"\n\t" \
81 : :"r" (value), "r" (0))
85 __u8 *instr; /* original instruction */
87 __u8 cpuid; /* cpuid bit set for replacement */
88 __u8 instrlen; /* length of original instruction */
89 __u8 replacementlen; /* length of new instruction, <= instrlen */
95 * Alternative instructions for different CPU types or capabilities.
97 * This allows to use optimized instructions even on generic binary
100 * length of oldinstr must be longer or equal the length of newinstr
101 * It can be padded with nops as needed.
103 * For non barrier like inlines please define new variants
104 * without volatile and memory clobber.
106 #define alternative(oldinstr, newinstr, feature) \
107 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
108 ".section .altinstructions,\"a\"\n" \
110 " .quad 661b\n" /* label */ \
111 " .quad 663f\n" /* new instruction */ \
112 " .byte %c0\n" /* feature bit */ \
113 " .byte 662b-661b\n" /* sourcelen */ \
114 " .byte 664f-663f\n" /* replacementlen */ \
116 ".section .altinstr_replacement,\"ax\"\n" \
117 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
118 ".previous" :: "i" (feature) : "memory")
121 * Alternative inline assembly with input.
124 * No memory clobber here.
125 * Argument numbers start with 1.
126 * Best is to use constraints that are fixed size (like (%1) ... "r")
127 * If you use variable sized constraints like "m" or "g" in the
128 * replacement make sure to pad to the worst case length.
130 #define alternative_input(oldinstr, newinstr, feature, input...) \
131 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
132 ".section .altinstructions,\"a\"\n" \
134 " .quad 661b\n" /* label */ \
135 " .quad 663f\n" /* new instruction */ \
136 " .byte %c0\n" /* feature bit */ \
137 " .byte 662b-661b\n" /* sourcelen */ \
138 " .byte 664f-663f\n" /* replacementlen */ \
140 ".section .altinstr_replacement,\"ax\"\n" \
141 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
142 ".previous" :: "i" (feature), ##input)
144 /* Like alternative_input, but with a single output argument */
145 #define alternative_io(oldinstr, newinstr, feature, output, input...) \
146 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
147 ".section .altinstructions,\"a\"\n" \
149 " .quad 661b\n" /* label */ \
150 " .quad 663f\n" /* new instruction */ \
151 " .byte %c[feat]\n" /* feature bit */ \
152 " .byte 662b-661b\n" /* sourcelen */ \
153 " .byte 664f-663f\n" /* replacementlen */ \
155 ".section .altinstr_replacement,\"ax\"\n" \
156 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
157 ".previous" : output : [feat] "i" (feature), ##input)
160 * Clear and set 'TS' bit respectively
162 #define clts() (HYPERVISOR_fpu_taskswitch(0))
164 static inline unsigned long read_cr0(void)
167 asm volatile("movq %%cr0,%0" : "=r" (cr0));
171 static inline void write_cr0(unsigned long val)
173 asm volatile("movq %0,%%cr0" :: "r" (val));
176 #define read_cr3() ({ \
177 unsigned long __dummy; \
178 asm("movq %%cr3,%0" : "=r" (__dummy)); \
179 machine_to_phys(__dummy); \
182 static inline unsigned long read_cr4(void)
185 asm("movq %%cr4,%0" : "=r" (cr4));
189 static inline void write_cr4(unsigned long val)
191 asm volatile("movq %0,%%cr4" :: "r" (val));
194 #define stts() (HYPERVISOR_fpu_taskswitch(1))
197 __asm__ __volatile__ ("wbinvd": : :"memory");
200 * On SMP systems, when the scheduler does migration-cost autodetection,
201 * it needs a way to flush as much of the CPU's caches as possible.
203 static inline void sched_cacheflush(void)
208 #endif /* __KERNEL__ */
210 #define nop() __asm__ __volatile__ ("nop")
212 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
214 #define tas(ptr) (xchg((ptr),1))
216 #define __xg(x) ((volatile long *)(x))
218 static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
223 #define _set_64bit set_64bit
226 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
227 * Note 2: xchg has side effect, so that attribute volatile is necessary,
228 * but generally the primitive is invalid, *ptr is output argument. --ANK
230 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
234 __asm__ __volatile__("xchgb %b0,%1"
236 :"m" (*__xg(ptr)), "0" (x)
240 __asm__ __volatile__("xchgw %w0,%1"
242 :"m" (*__xg(ptr)), "0" (x)
246 __asm__ __volatile__("xchgl %k0,%1"
248 :"m" (*__xg(ptr)), "0" (x)
252 __asm__ __volatile__("xchgq %0,%1"
254 :"m" (*__xg(ptr)), "0" (x)
262 * Atomic compare and exchange. Compare OLD with MEM, if identical,
263 * store NEW in MEM. Return the initial value in MEM. Success is
264 * indicated by comparing RETURN with OLD.
267 #define __HAVE_ARCH_CMPXCHG 1
269 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
270 unsigned long new, int size)
275 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
277 : "q"(new), "m"(*__xg(ptr)), "0"(old)
281 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
283 : "r"(new), "m"(*__xg(ptr)), "0"(old)
287 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
289 : "r"(new), "m"(*__xg(ptr)), "0"(old)
293 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
295 : "r"(new), "m"(*__xg(ptr)), "0"(old)
302 #define cmpxchg(ptr,o,n)\
303 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
304 (unsigned long)(n),sizeof(*(ptr))))
307 #define smp_mb() mb()
308 #define smp_rmb() rmb()
309 #define smp_wmb() wmb()
310 #define smp_read_barrier_depends() do {} while(0)
312 #define smp_mb() barrier()
313 #define smp_rmb() barrier()
314 #define smp_wmb() barrier()
315 #define smp_read_barrier_depends() do {} while(0)
320 * Force strict CPU ordering.
321 * And yes, this is required on UP too when we're talking
324 #define mb() asm volatile("mfence":::"memory")
325 #define rmb() asm volatile("lfence":::"memory")
327 #ifdef CONFIG_UNORDERED_IO
328 #define wmb() asm volatile("sfence" ::: "memory")
330 #define wmb() asm volatile("" ::: "memory")
332 #define read_barrier_depends() do {} while(0)
333 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
334 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
336 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
340 * The use of 'barrier' in the following reflects their use as local-lock
341 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
342 * critical operations are executed. All critical operations must complete
343 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
344 * includes these barriers, for example.
349 vcpu_info_t *_vcpu; \
351 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
352 _vcpu->evtchn_upcall_mask = 1; \
353 preempt_enable_no_resched(); \
359 vcpu_info_t *_vcpu; \
362 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
363 _vcpu->evtchn_upcall_mask = 0; \
364 barrier(); /* unmask then check (avoid races) */ \
365 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
366 force_evtchn_callback(); \
370 #define __save_flags(x) \
372 vcpu_info_t *_vcpu; \
374 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
375 (x) = _vcpu->evtchn_upcall_mask; \
379 #define __restore_flags(x) \
381 vcpu_info_t *_vcpu; \
384 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
385 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
386 barrier(); /* unmask then check (avoid races) */ \
387 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
388 force_evtchn_callback(); \
391 preempt_enable_no_resched(); \
394 #define __save_and_cli(x) \
396 vcpu_info_t *_vcpu; \
398 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
399 (x) = _vcpu->evtchn_upcall_mask; \
400 _vcpu->evtchn_upcall_mask = 1; \
401 preempt_enable_no_resched(); \
405 #define local_irq_save(x) __save_and_cli(x)
406 #define local_irq_restore(x) __restore_flags(x)
407 #define local_save_flags(x) __save_flags(x)
408 #define local_irq_disable() __cli()
409 #define local_irq_enable() __sti()
411 /* Cannot use preempt_enable() here as we would recurse in preempt_sched(). */
412 #define irqs_disabled() \
414 vcpu_info_t *_vcpu; \
416 _vcpu = &HYPERVISOR_shared_info->vcpu_info[__vcpu_id]; \
417 ___x = (_vcpu->evtchn_upcall_mask != 0); \
418 preempt_enable_no_resched(); \
421 void safe_halt(void);
424 void cpu_idle_wait(void);
426 extern unsigned long arch_align_stack(unsigned long sp);