4 #include <linux/kernel.h>
5 #include <asm/segment.h>
6 #include <asm/alternative.h>
7 #include <asm/synch_bitops.h>
8 #include <asm/hypervisor.h>
9 #include <xen/interface/arch-x86_64.h>
14 #define __vcpu_id smp_processor_id()
20 #define STR(x) __STR(x)
22 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
23 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
25 /* frame pointer must be last for get_wchan */
26 #define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
27 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
29 #define __EXTRA_CLOBBER \
30 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
32 /* Save restore flags to clear handle leaking NT */
33 #define switch_to(prev,next,last) \
34 asm volatile(SAVE_CONTEXT \
35 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
36 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
37 "call __switch_to\n\t" \
38 ".globl thread_return\n" \
39 "thread_return:\n\t" \
40 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
41 "movq %P[thread_info](%%rsi),%%r8\n\t" \
42 LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
43 "movq %%rax,%%rdi\n\t" \
44 "jc ret_from_fork\n\t" \
47 : [next] "S" (next), [prev] "D" (prev), \
48 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
49 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
50 [tif_fork] "i" (TIF_FORK), \
51 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
52 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
53 : "memory", "cc" __EXTRA_CLOBBER)
55 extern void load_gs_index(unsigned);
58 * Load a segment. Fall back on loading the zero
59 * segment if something goes wrong..
61 #define loadsegment(seg,value) \
64 "movl %k0,%%" #seg "\n" \
66 ".section .fixup,\"ax\"\n" \
68 "movl %1,%%" #seg "\n\t" \
71 ".section __ex_table,\"a\"\n\t" \
75 : :"r" (value), "r" (0))
78 * Clear and set 'TS' bit respectively
80 #define clts() (HYPERVISOR_fpu_taskswitch(0))
82 static inline unsigned long read_cr0(void)
85 asm volatile("movq %%cr0,%0" : "=r" (cr0));
89 static inline void write_cr0(unsigned long val)
91 asm volatile("movq %0,%%cr0" :: "r" (val));
94 static inline unsigned long read_cr3(void)
97 asm("movq %%cr3,%0" : "=r" (cr3));
98 return machine_to_phys(cr3);
101 static inline unsigned long read_cr4(void)
104 asm("movq %%cr4,%0" : "=r" (cr4));
108 static inline void write_cr4(unsigned long val)
110 asm volatile("movq %0,%%cr4" :: "r" (val));
113 #define stts() (HYPERVISOR_fpu_taskswitch(1))
116 __asm__ __volatile__ ("wbinvd": : :"memory");
119 * On SMP systems, when the scheduler does migration-cost autodetection,
120 * it needs a way to flush as much of the CPU's caches as possible.
122 static inline void sched_cacheflush(void)
127 #endif /* __KERNEL__ */
129 #define nop() __asm__ __volatile__ ("nop")
131 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
133 #define tas(ptr) (xchg((ptr),1))
135 #define __xg(x) ((volatile long *)(x))
137 static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
142 #define _set_64bit set_64bit
145 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
146 * Note 2: xchg has side effect, so that attribute volatile is necessary,
147 * but generally the primitive is invalid, *ptr is output argument. --ANK
149 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
153 __asm__ __volatile__("xchgb %b0,%1"
155 :"m" (*__xg(ptr)), "0" (x)
159 __asm__ __volatile__("xchgw %w0,%1"
161 :"m" (*__xg(ptr)), "0" (x)
165 __asm__ __volatile__("xchgl %k0,%1"
167 :"m" (*__xg(ptr)), "0" (x)
171 __asm__ __volatile__("xchgq %0,%1"
173 :"m" (*__xg(ptr)), "0" (x)
181 * Atomic compare and exchange. Compare OLD with MEM, if identical,
182 * store NEW in MEM. Return the initial value in MEM. Success is
183 * indicated by comparing RETURN with OLD.
186 #define __HAVE_ARCH_CMPXCHG 1
188 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
189 unsigned long new, int size)
194 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
196 : "q"(new), "m"(*__xg(ptr)), "0"(old)
200 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
202 : "r"(new), "m"(*__xg(ptr)), "0"(old)
206 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
208 : "r"(new), "m"(*__xg(ptr)), "0"(old)
212 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
214 : "r"(new), "m"(*__xg(ptr)), "0"(old)
221 #define cmpxchg(ptr,o,n)\
222 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
223 (unsigned long)(n),sizeof(*(ptr))))
226 #define smp_mb() mb()
227 #define smp_rmb() rmb()
228 #define smp_wmb() wmb()
229 #define smp_read_barrier_depends() do {} while(0)
231 #define smp_mb() barrier()
232 #define smp_rmb() barrier()
233 #define smp_wmb() barrier()
234 #define smp_read_barrier_depends() do {} while(0)
239 * Force strict CPU ordering.
240 * And yes, this is required on UP too when we're talking
243 #define mb() asm volatile("mfence":::"memory")
244 #define rmb() asm volatile("lfence":::"memory")
246 #ifdef CONFIG_UNORDERED_IO
247 #define wmb() asm volatile("sfence" ::: "memory")
249 #define wmb() asm volatile("" ::: "memory")
251 #define read_barrier_depends() do {} while(0)
252 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
254 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
256 void safe_halt(void);
259 #include <linux/irqflags.h>
261 void cpu_idle_wait(void);
263 extern unsigned long arch_align_stack(unsigned long sp);
264 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);