4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
7 #include <asm/synch_bitops.h>
8 #include <asm-xen/hypervisor.h>
9 #include <asm-xen/xen-public/arch-x86_64.h>
14 #define LOCK_PREFIX "lock ; "
16 #define LOCK_PREFIX ""
20 #define STR(x) __STR(x)
22 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
23 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
25 /* frame pointer must be last for get_wchan */
26 #define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
27 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
29 #define __EXTRA_CLOBBER \
30 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
32 #define switch_to(prev,next,last) \
33 asm volatile(SAVE_CONTEXT \
34 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
35 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
36 "call __switch_to\n\t" \
37 ".globl thread_return\n" \
38 "thread_return:\n\t" \
39 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
40 "movq %P[thread_info](%%rsi),%%r8\n\t" \
41 LOCK "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
42 "movq %%rax,%%rdi\n\t" \
43 "jc ret_from_fork\n\t" \
46 : [next] "S" (next), [prev] "D" (prev), \
47 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
48 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
49 [tif_fork] "i" (TIF_FORK), \
50 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
51 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
52 : "memory", "cc" __EXTRA_CLOBBER)
55 extern void load_gs_index(unsigned);
58 * Load a segment. Fall back on loading the zero
59 * segment if something goes wrong..
61 #define loadsegment(seg,value) \
64 "movl %k0,%%" #seg "\n" \
66 ".section .fixup,\"ax\"\n" \
68 "movl %1,%%" #seg "\n\t" \
71 ".section __ex_table,\"a\"\n\t" \
75 : :"r" (value), "r" (0))
77 #define set_debug(value,register) \
78 __asm__("movq %0,%%db" #register \
80 :"r" ((unsigned long) value))
85 __u8 *instr; /* original instruction */
87 __u8 cpuid; /* cpuid bit set for replacement */
88 __u8 instrlen; /* length of original instruction */
89 __u8 replacementlen; /* length of new instruction, <= instrlen */
95 * Alternative instructions for different CPU types or capabilities.
97 * This allows to use optimized instructions even on generic binary
100 * length of oldinstr must be longer or equal the length of newinstr
101 * It can be padded with nops as needed.
103 * For non barrier like inlines please define new variants
104 * without volatile and memory clobber.
106 #define alternative(oldinstr, newinstr, feature) \
107 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
108 ".section .altinstructions,\"a\"\n" \
110 " .quad 661b\n" /* label */ \
111 " .quad 663f\n" /* new instruction */ \
112 " .byte %c0\n" /* feature bit */ \
113 " .byte 662b-661b\n" /* sourcelen */ \
114 " .byte 664f-663f\n" /* replacementlen */ \
116 ".section .altinstr_replacement,\"ax\"\n" \
117 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
118 ".previous" :: "i" (feature) : "memory")
121 * Alternative inline assembly with input.
124 * No memory clobber here.
125 * Argument numbers start with 1.
126 * Best is to use constraints that are fixed size (like (%1) ... "r")
127 * If you use variable sized constraints like "m" or "g" in the
128 * replacement maake sure to pad to the worst case length.
130 #define alternative_input(oldinstr, newinstr, feature, input...) \
131 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
132 ".section .altinstructions,\"a\"\n" \
134 " .quad 661b\n" /* label */ \
135 " .quad 663f\n" /* new instruction */ \
136 " .byte %c0\n" /* feature bit */ \
137 " .byte 662b-661b\n" /* sourcelen */ \
138 " .byte 664f-663f\n" /* replacementlen */ \
140 ".section .altinstr_replacement,\"ax\"\n" \
141 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
142 ".previous" :: "i" (feature), ##input)
145 * Clear and set 'TS' bit respectively
147 #define clts() (HYPERVISOR_fpu_taskswitch(0))
148 static inline unsigned long read_cr0(void)
153 static inline void write_cr0(unsigned long val)
158 static inline unsigned long read_cr3(void)
163 static inline unsigned long read_cr4(void)
168 static inline void write_cr4(unsigned long val)
172 #define stts() (HYPERVISOR_fpu_taskswitch(1))
175 __asm__ __volatile__ ("wbinvd": : :"memory");
177 #endif /* __KERNEL__ */
179 #define nop() __asm__ __volatile__ ("nop")
181 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
183 #define tas(ptr) (xchg((ptr),1))
185 #define __xg(x) ((volatile long *)(x))
187 extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
192 #define _set_64bit set_64bit
195 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
196 * Note 2: xchg has side effect, so that attribute volatile is necessary,
197 * but generally the primitive is invalid, *ptr is output argument. --ANK
199 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
203 __asm__ __volatile__("xchgb %b0,%1"
205 :"m" (*__xg(ptr)), "0" (x)
209 __asm__ __volatile__("xchgw %w0,%1"
211 :"m" (*__xg(ptr)), "0" (x)
215 __asm__ __volatile__("xchgl %k0,%1"
217 :"m" (*__xg(ptr)), "0" (x)
221 __asm__ __volatile__("xchgq %0,%1"
223 :"m" (*__xg(ptr)), "0" (x)
231 * Atomic compare and exchange. Compare OLD with MEM, if identical,
232 * store NEW in MEM. Return the initial value in MEM. Success is
233 * indicated by comparing RETURN with OLD.
236 #define __HAVE_ARCH_CMPXCHG 1
238 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
239 unsigned long new, int size)
244 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
246 : "q"(new), "m"(*__xg(ptr)), "0"(old)
250 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
252 : "q"(new), "m"(*__xg(ptr)), "0"(old)
256 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
258 : "q"(new), "m"(*__xg(ptr)), "0"(old)
262 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
264 : "q"(new), "m"(*__xg(ptr)), "0"(old)
271 #define cmpxchg(ptr,o,n)\
272 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
273 (unsigned long)(n),sizeof(*(ptr))))
276 #define smp_mb() mb()
277 #define smp_rmb() rmb()
278 #define smp_wmb() wmb()
279 #define smp_read_barrier_depends() do {} while(0)
281 #define smp_mb() barrier()
282 #define smp_rmb() barrier()
283 #define smp_wmb() barrier()
284 #define smp_read_barrier_depends() do {} while(0)
289 * Force strict CPU ordering.
290 * And yes, this is required on UP too when we're talking
293 #define mb() asm volatile("mfence":::"memory")
294 #define rmb() asm volatile("lfence":::"memory")
296 #ifdef CONFIG_UNORDERED_IO
297 #define wmb() asm volatile("sfence" ::: "memory")
299 #define wmb() asm volatile("" ::: "memory")
301 #define read_barrier_depends() do {} while(0)
302 #define set_mb(var, value) do { xchg(&var, value); } while (0)
303 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
305 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
309 * The use of 'barrier' in the following reflects their use as local-lock
310 * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
311 * critical operations are executed. All critical operations must complete
312 * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
313 * includes these barriers, for example.
318 vcpu_info_t *_vcpu; \
320 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
321 _vcpu->evtchn_upcall_mask = 1; \
322 preempt_enable_no_resched(); \
328 vcpu_info_t *_vcpu; \
331 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
332 _vcpu->evtchn_upcall_mask = 0; \
333 barrier(); /* unmask then check (avoid races) */ \
334 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
335 force_evtchn_callback(); \
339 #define __save_flags(x) \
341 vcpu_info_t *_vcpu; \
342 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
343 (x) = _vcpu->evtchn_upcall_mask; \
346 #define __restore_flags(x) \
348 vcpu_info_t *_vcpu; \
351 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
352 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
353 barrier(); /* unmask then check (avoid races) */ \
354 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
355 force_evtchn_callback(); \
358 preempt_enable_no_resched(); \
361 #define safe_halt() ((void)0)
363 #define __save_and_cli(x) \
365 vcpu_info_t *_vcpu; \
367 _vcpu = &HYPERVISOR_shared_info->vcpu_data[smp_processor_id()]; \
368 (x) = _vcpu->evtchn_upcall_mask; \
369 _vcpu->evtchn_upcall_mask = 1; \
370 preempt_enable_no_resched(); \
374 void cpu_idle_wait(void);
376 #define local_irq_save(x) __save_and_cli(x)
377 #define local_irq_restore(x) __restore_flags(x)
378 #define local_save_flags(x) __save_flags(x)
379 #define local_irq_disable() __cli()
380 #define local_irq_enable() __sti()
382 #define irqs_disabled() \
383 HYPERVISOR_shared_info->vcpu_data[smp_processor_id()].evtchn_upcall_mask
386 * disable hlt during certain critical i/o operations
388 #define HAVE_DISABLE_HLT
389 void disable_hlt(void);
390 void enable_hlt(void);