4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
11 #define LOCK_PREFIX "lock ; "
13 #define LOCK_PREFIX ""
17 #define STR(x) __STR(x)
19 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
20 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
22 /* frame pointer must be last for get_wchan */
23 #define SAVE_CONTEXT "pushfq ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
24 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popfq\n\t"
26 #define __EXTRA_CLOBBER \
27 ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
29 #define switch_to(prev,next,last) \
30 asm volatile(SAVE_CONTEXT \
31 "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
32 "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
33 "call __switch_to\n\t" \
34 ".globl thread_return\n" \
35 "thread_return:\n\t" \
36 "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
37 "movq %P[thread_info](%%rsi),%%r8\n\t" \
38 "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
39 "movq %%rax,%%rdi\n\t" \
40 "jc ret_from_fork\n\t" \
43 : [next] "S" (next), [prev] "D" (prev), \
44 [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
45 [ti_flags] "i" (offsetof(struct thread_info, flags)),\
46 [tif_fork] "i" (TIF_FORK), \
47 [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
48 [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
49 : "memory", "cc" __EXTRA_CLOBBER)
51 extern void load_gs_index(unsigned);
54 * Load a segment. Fall back on loading the zero
55 * segment if something goes wrong..
57 #define loadsegment(seg,value) \
60 "movl %k0,%%" #seg "\n" \
62 ".section .fixup,\"ax\"\n" \
64 "movl %1,%%" #seg "\n\t" \
67 ".section __ex_table,\"a\"\n\t" \
71 : :"r" (value), "r" (0))
73 #define set_debug(value,register) \
74 __asm__("movq %0,%%db" #register \
76 :"r" ((unsigned long) value))
81 __u8 *instr; /* original instruction */
83 __u8 cpuid; /* cpuid bit set for replacement */
84 __u8 instrlen; /* length of original instruction */
85 __u8 replacementlen; /* length of new instruction, <= instrlen */
91 * Alternative instructions for different CPU types or capabilities.
93 * This allows to use optimized instructions even on generic binary
96 * length of oldinstr must be longer or equal the length of newinstr
97 * It can be padded with nops as needed.
99 * For non barrier like inlines please define new variants
100 * without volatile and memory clobber.
102 #define alternative(oldinstr, newinstr, feature) \
103 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
104 ".section .altinstructions,\"a\"\n" \
106 " .quad 661b\n" /* label */ \
107 " .quad 663f\n" /* new instruction */ \
108 " .byte %c0\n" /* feature bit */ \
109 " .byte 662b-661b\n" /* sourcelen */ \
110 " .byte 664f-663f\n" /* replacementlen */ \
112 ".section .altinstr_replacement,\"ax\"\n" \
113 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
114 ".previous" :: "i" (feature) : "memory")
117 * Alternative inline assembly with input.
120 * No memory clobber here.
121 * Argument numbers start with 1.
122 * Best is to use constraints that are fixed size (like (%1) ... "r")
123 * If you use variable sized constraints like "m" or "g" in the
124 * replacement maake sure to pad to the worst case length.
126 #define alternative_input(oldinstr, newinstr, feature, input) \
127 asm volatile ("661:\n\t" oldinstr "\n662:\n" \
128 ".section .altinstructions,\"a\"\n" \
130 " .quad 661b\n" /* label */ \
131 " .quad 663f\n" /* new instruction */ \
132 " .byte %c0\n" /* feature bit */ \
133 " .byte 662b-661b\n" /* sourcelen */ \
134 " .byte 664f-663f\n" /* replacementlen */ \
136 ".section .altinstr_replacement,\"ax\"\n" \
137 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
138 ".previous" :: "i" (feature), input)
141 * Clear and set 'TS' bit respectively
143 #define clts() __asm__ __volatile__ ("clts")
145 static inline unsigned long read_cr0(void)
148 asm volatile("movq %%cr0,%0" : "=r" (cr0));
152 static inline void write_cr0(unsigned long val)
154 asm volatile("movq %0,%%cr0" :: "r" (val));
157 static inline unsigned long read_cr3(void)
160 asm("movq %%cr3,%0" : "=r" (cr3));
164 static inline unsigned long read_cr4(void)
167 asm("movq %%cr4,%0" : "=r" (cr4));
171 static inline void write_cr4(unsigned long val)
173 asm volatile("movq %0,%%cr4" :: "r" (val));
176 #define stts() write_cr0(8 | read_cr0())
179 __asm__ __volatile__ ("wbinvd": : :"memory");
181 #endif /* __KERNEL__ */
183 #define nop() __asm__ __volatile__ ("nop")
185 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
187 #define tas(ptr) (xchg((ptr),1))
189 #define __xg(x) ((volatile long *)(x))
191 extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
196 #define _set_64bit set_64bit
199 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
200 * Note 2: xchg has side effect, so that attribute volatile is necessary,
201 * but generally the primitive is invalid, *ptr is output argument. --ANK
203 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
207 __asm__ __volatile__("xchgb %b0,%1"
209 :"m" (*__xg(ptr)), "0" (x)
213 __asm__ __volatile__("xchgw %w0,%1"
215 :"m" (*__xg(ptr)), "0" (x)
219 __asm__ __volatile__("xchgl %k0,%1"
221 :"m" (*__xg(ptr)), "0" (x)
225 __asm__ __volatile__("xchgq %0,%1"
227 :"m" (*__xg(ptr)), "0" (x)
235 * Atomic compare and exchange. Compare OLD with MEM, if identical,
236 * store NEW in MEM. Return the initial value in MEM. Success is
237 * indicated by comparing RETURN with OLD.
240 #define __HAVE_ARCH_CMPXCHG 1
242 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
243 unsigned long new, int size)
248 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
250 : "q"(new), "m"(*__xg(ptr)), "0"(old)
254 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
256 : "q"(new), "m"(*__xg(ptr)), "0"(old)
260 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
262 : "q"(new), "m"(*__xg(ptr)), "0"(old)
266 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
268 : "q"(new), "m"(*__xg(ptr)), "0"(old)
275 #define cmpxchg(ptr,o,n)\
276 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
277 (unsigned long)(n),sizeof(*(ptr))))
280 #define smp_mb() mb()
281 #define smp_rmb() rmb()
282 #define smp_wmb() wmb()
283 #define smp_read_barrier_depends() do {} while(0)
285 #define smp_mb() barrier()
286 #define smp_rmb() barrier()
287 #define smp_wmb() barrier()
288 #define smp_read_barrier_depends() do {} while(0)
293 * Force strict CPU ordering.
294 * And yes, this is required on UP too when we're talking
297 #define mb() asm volatile("mfence":::"memory")
298 #define rmb() asm volatile("lfence":::"memory")
300 /* could use SFENCE here, but it would be only needed for unordered SSE
301 store instructions and we always do an explicit sfence with them currently.
302 the ordering of normal stores is serialized enough. Just make it a compile
304 #define wmb() asm volatile("" ::: "memory")
305 #define read_barrier_depends() do {} while(0)
306 #define set_mb(var, value) do { xchg(&var, value); } while (0)
307 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
309 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
311 /* interrupt control.. */
312 #define local_save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0)
313 #define local_irq_restore(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc")
314 #define local_irq_disable() __asm__ __volatile__("cli": : :"memory")
315 #define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
316 /* used in the idle loop; sti takes one instruction cycle to complete */
317 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
319 #define irqs_disabled() \
321 unsigned long flags; \
322 local_save_flags(flags); \
326 /* For spinlocks etc */
327 #define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0)
330 * disable hlt during certain critical i/o operations
332 #define HAVE_DISABLE_HLT
333 void disable_hlt(void);
334 void enable_hlt(void);