1 #ifndef _ASM_IA64_GCC_INTRIN_H
2 #define _ASM_IA64_GCC_INTRIN_H
5 * Copyright (C) 2002,2003 Jun Nakajima <jun.nakajima@intel.com>
6 * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
10 /* define this macro to get some asm stmts included in 'c' files */
13 /* Optimization barrier */
14 /* The "volatile" is due to gcc bugs */
15 #define ia64_barrier() asm volatile ("":::"memory")
17 #define ia64_stop() asm volatile (";;"::)
19 #define ia64_invala_gr(regnum) asm volatile ("invala.e r%0" :: "i"(regnum))
21 #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
23 extern void ia64_bad_param_for_setreg (void);
24 extern void ia64_bad_param_for_getreg (void);
26 register unsigned long ia64_r13 asm ("r13");
28 #define ia64_setreg(regnum, val) \
31 case _IA64_REG_PSR_L: \
32 asm volatile ("mov psr.l=%0" :: "r"(val) : "memory"); \
34 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
35 asm volatile ("mov ar%0=%1" :: \
36 "i" (regnum - _IA64_REG_AR_KR0), \
37 "r"(val): "memory"); \
39 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
40 asm volatile ("mov cr%0=%1" :: \
41 "i" (regnum - _IA64_REG_CR_DCR), \
42 "r"(val): "memory" ); \
45 asm volatile ("mov r12=%0" :: \
46 "r"(val): "memory"); \
49 asm volatile ("mov gp=%0" :: "r"(val) : "memory"); \
52 ia64_bad_param_for_setreg(); \
57 #define ia64_getreg(regnum) \
59 __u64 ia64_intri_res; \
63 asm volatile ("mov %0=gp" : "=r"(ia64_intri_res)); \
66 asm volatile ("mov %0=ip" : "=r"(ia64_intri_res)); \
69 asm volatile ("mov %0=psr" : "=r"(ia64_intri_res)); \
71 case _IA64_REG_TP: /* for current() */ \
72 ia64_intri_res = ia64_r13; \
74 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_EC: \
75 asm volatile ("mov %0=ar%1" : "=r" (ia64_intri_res) \
76 : "i"(regnum - _IA64_REG_AR_KR0)); \
78 case _IA64_REG_CR_DCR ... _IA64_REG_CR_LRR1: \
79 asm volatile ("mov %0=cr%1" : "=r" (ia64_intri_res) \
80 : "i" (regnum - _IA64_REG_CR_DCR)); \
83 asm volatile ("mov %0=sp" : "=r" (ia64_intri_res)); \
86 ia64_bad_param_for_getreg(); \
92 #define ia64_hint_pause 0
94 #define ia64_hint(mode) \
97 case ia64_hint_pause: \
98 asm volatile ("hint @pause" ::: "memory"); \
104 /* Integer values for mux1 instruction */
105 #define ia64_mux1_brcst 0
106 #define ia64_mux1_mix 8
107 #define ia64_mux1_shuf 9
108 #define ia64_mux1_alt 10
109 #define ia64_mux1_rev 11
111 #define ia64_mux1(x, mode) \
113 __u64 ia64_intri_res; \
116 case ia64_mux1_brcst: \
117 asm ("mux1 %0=%1,@brcst" : "=r" (ia64_intri_res) : "r" (x)); \
119 case ia64_mux1_mix: \
120 asm ("mux1 %0=%1,@mix" : "=r" (ia64_intri_res) : "r" (x)); \
122 case ia64_mux1_shuf: \
123 asm ("mux1 %0=%1,@shuf" : "=r" (ia64_intri_res) : "r" (x)); \
125 case ia64_mux1_alt: \
126 asm ("mux1 %0=%1,@alt" : "=r" (ia64_intri_res) : "r" (x)); \
128 case ia64_mux1_rev: \
129 asm ("mux1 %0=%1,@rev" : "=r" (ia64_intri_res) : "r" (x)); \
135 #define ia64_popcnt(x) \
137 __u64 ia64_intri_res; \
138 asm ("popcnt %0=%1" : "=r" (ia64_intri_res) : "r" (x)); \
143 #define ia64_getf_exp(x) \
145 long ia64_intri_res; \
147 asm ("getf.exp %0=%1" : "=r"(ia64_intri_res) : "f"(x)); \
152 #define ia64_shrp(a, b, count) \
154 __u64 ia64_intri_res; \
155 asm ("shrp %0=%1,%2,%3" : "=r"(ia64_intri_res) : "r"(a), "r"(b), "i"(count)); \
159 #define ia64_ldfs(regnum, x) \
161 register double __f__ asm ("f"#regnum); \
162 asm volatile ("ldfs %0=[%1]" :"=f"(__f__): "r"(x)); \
165 #define ia64_ldfd(regnum, x) \
167 register double __f__ asm ("f"#regnum); \
168 asm volatile ("ldfd %0=[%1]" :"=f"(__f__): "r"(x)); \
171 #define ia64_ldfe(regnum, x) \
173 register double __f__ asm ("f"#regnum); \
174 asm volatile ("ldfe %0=[%1]" :"=f"(__f__): "r"(x)); \
177 #define ia64_ldf8(regnum, x) \
179 register double __f__ asm ("f"#regnum); \
180 asm volatile ("ldf8 %0=[%1]" :"=f"(__f__): "r"(x)); \
183 #define ia64_ldf_fill(regnum, x) \
185 register double __f__ asm ("f"#regnum); \
186 asm volatile ("ldf.fill %0=[%1]" :"=f"(__f__): "r"(x)); \
189 #define ia64_stfs(x, regnum) \
191 register double __f__ asm ("f"#regnum); \
192 asm volatile ("stfs [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
195 #define ia64_stfd(x, regnum) \
197 register double __f__ asm ("f"#regnum); \
198 asm volatile ("stfd [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
201 #define ia64_stfe(x, regnum) \
203 register double __f__ asm ("f"#regnum); \
204 asm volatile ("stfe [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
207 #define ia64_stf8(x, regnum) \
209 register double __f__ asm ("f"#regnum); \
210 asm volatile ("stf8 [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
213 #define ia64_stf_spill(x, regnum) \
215 register double __f__ asm ("f"#regnum); \
216 asm volatile ("stf.spill [%0]=%1" :: "r"(x), "f"(__f__) : "memory"); \
219 #define ia64_fetchadd4_acq(p, inc) \
222 __u64 ia64_intri_res; \
223 asm volatile ("fetchadd4.acq %0=[%1],%2" \
224 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
230 #define ia64_fetchadd4_rel(p, inc) \
232 __u64 ia64_intri_res; \
233 asm volatile ("fetchadd4.rel %0=[%1],%2" \
234 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
240 #define ia64_fetchadd8_acq(p, inc) \
243 __u64 ia64_intri_res; \
244 asm volatile ("fetchadd8.acq %0=[%1],%2" \
245 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
251 #define ia64_fetchadd8_rel(p, inc) \
253 __u64 ia64_intri_res; \
254 asm volatile ("fetchadd8.rel %0=[%1],%2" \
255 : "=r"(ia64_intri_res) : "r"(p), "i" (inc) \
261 #define ia64_xchg1(ptr,x) \
263 __u64 ia64_intri_res; \
264 asm __volatile ("xchg1 %0=[%1],%2" : "=r" (ia64_intri_res) \
265 : "r" (ptr), "r" (x) : "memory"); \
269 #define ia64_xchg2(ptr,x) \
271 __u64 ia64_intri_res; \
272 asm __volatile ("xchg2 %0=[%1],%2" : "=r" (ia64_intri_res) \
273 : "r" (ptr), "r" (x) : "memory"); \
277 #define ia64_xchg4(ptr,x) \
279 __u64 ia64_intri_res; \
280 asm __volatile ("xchg4 %0=[%1],%2" : "=r" (ia64_intri_res) \
281 : "r" (ptr), "r" (x) : "memory"); \
285 #define ia64_xchg8(ptr,x) \
287 __u64 ia64_intri_res; \
288 asm __volatile ("xchg8 %0=[%1],%2" : "=r" (ia64_intri_res) \
289 : "r" (ptr), "r" (x) : "memory"); \
293 #define ia64_cmpxchg1_acq(ptr, new, old) \
295 __u64 ia64_intri_res; \
296 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
297 asm volatile ("cmpxchg1.acq %0=[%1],%2,ar.ccv": \
298 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
302 #define ia64_cmpxchg1_rel(ptr, new, old) \
304 __u64 ia64_intri_res; \
305 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
306 asm volatile ("cmpxchg1.rel %0=[%1],%2,ar.ccv": \
307 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
311 #define ia64_cmpxchg2_acq(ptr, new, old) \
313 __u64 ia64_intri_res; \
314 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
315 asm volatile ("cmpxchg2.acq %0=[%1],%2,ar.ccv": \
316 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
320 #define ia64_cmpxchg2_rel(ptr, new, old) \
322 __u64 ia64_intri_res; \
323 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
325 asm volatile ("cmpxchg2.rel %0=[%1],%2,ar.ccv": \
326 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
330 #define ia64_cmpxchg4_acq(ptr, new, old) \
332 __u64 ia64_intri_res; \
333 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
334 asm volatile ("cmpxchg4.acq %0=[%1],%2,ar.ccv": \
335 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
339 #define ia64_cmpxchg4_rel(ptr, new, old) \
341 __u64 ia64_intri_res; \
342 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
343 asm volatile ("cmpxchg4.rel %0=[%1],%2,ar.ccv": \
344 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
348 #define ia64_cmpxchg8_acq(ptr, new, old) \
350 __u64 ia64_intri_res; \
351 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
352 asm volatile ("cmpxchg8.acq %0=[%1],%2,ar.ccv": \
353 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
357 #define ia64_cmpxchg8_rel(ptr, new, old) \
359 __u64 ia64_intri_res; \
360 asm volatile ("mov ar.ccv=%0;;" :: "rO"(old)); \
362 asm volatile ("cmpxchg8.rel %0=[%1],%2,ar.ccv": \
363 "=r"(ia64_intri_res) : "r"(ptr), "r"(new) : "memory"); \
367 #define ia64_mf() asm volatile ("mf" ::: "memory")
368 #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
370 #define ia64_invala() asm volatile ("invala" ::: "memory")
372 #define ia64_thash(addr) \
374 __u64 ia64_intri_res; \
375 asm volatile ("thash %0=%1" : "=r"(ia64_intri_res) : "r" (addr)); \
379 #define ia64_srlz_i() asm volatile (";; srlz.i ;;" ::: "memory")
381 #define ia64_srlz_d() asm volatile (";; srlz.d" ::: "memory");
383 #define ia64_nop(x) asm volatile ("nop %0"::"i"(x));
385 #define ia64_itci(addr) asm volatile ("itc.i %0;;" :: "r"(addr) : "memory")
387 #define ia64_itcd(addr) asm volatile ("itc.d %0;;" :: "r"(addr) : "memory")
390 #define ia64_itri(trnum, addr) asm volatile ("itr.i itr[%0]=%1" \
391 :: "r"(trnum), "r"(addr) : "memory")
393 #define ia64_itrd(trnum, addr) asm volatile ("itr.d dtr[%0]=%1" \
394 :: "r"(trnum), "r"(addr) : "memory")
396 #define ia64_tpa(addr) \
399 asm volatile ("tpa %0 = %1" : "=r"(ia64_pa) : "r"(addr) : "memory"); \
403 #define __ia64_set_dbr(index, val) \
404 asm volatile ("mov dbr[%0]=%1" :: "r"(index), "r"(val) : "memory")
406 #define ia64_set_ibr(index, val) \
407 asm volatile ("mov ibr[%0]=%1" :: "r"(index), "r"(val) : "memory")
409 #define ia64_set_pkr(index, val) \
410 asm volatile ("mov pkr[%0]=%1" :: "r"(index), "r"(val) : "memory")
412 #define ia64_set_pmc(index, val) \
413 asm volatile ("mov pmc[%0]=%1" :: "r"(index), "r"(val) : "memory")
415 #define ia64_set_pmd(index, val) \
416 asm volatile ("mov pmd[%0]=%1" :: "r"(index), "r"(val) : "memory")
418 #define ia64_set_rr(index, val) \
419 asm volatile ("mov rr[%0]=%1" :: "r"(index), "r"(val) : "memory");
421 #define ia64_get_cpuid(index) \
423 __u64 ia64_intri_res; \
424 asm volatile ("mov %0=cpuid[%r1]" : "=r"(ia64_intri_res) : "rO"(index)); \
428 #define __ia64_get_dbr(index) \
430 __u64 ia64_intri_res; \
431 asm volatile ("mov %0=dbr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
435 #define ia64_get_ibr(index) \
437 __u64 ia64_intri_res; \
438 asm volatile ("mov %0=ibr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
442 #define ia64_get_pkr(index) \
444 __u64 ia64_intri_res; \
445 asm volatile ("mov %0=pkr[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
449 #define ia64_get_pmc(index) \
451 __u64 ia64_intri_res; \
452 asm volatile ("mov %0=pmc[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
457 #define ia64_get_pmd(index) \
459 __u64 ia64_intri_res; \
460 asm volatile ("mov %0=pmd[%1]" : "=r"(ia64_intri_res) : "r"(index)); \
464 #define ia64_get_rr(index) \
466 __u64 ia64_intri_res; \
467 asm volatile ("mov %0=rr[%1]" : "=r"(ia64_intri_res) : "r" (index)); \
471 #define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
474 #define ia64_sync_i() asm volatile (";; sync.i" ::: "memory")
476 #define ia64_ssm(mask) asm volatile ("ssm %0":: "i"((mask)) : "memory")
477 #define ia64_rsm(mask) asm volatile ("rsm %0":: "i"((mask)) : "memory")
478 #define ia64_sum(mask) asm volatile ("sum %0":: "i"((mask)) : "memory")
479 #define ia64_rum(mask) asm volatile ("rum %0":: "i"((mask)) : "memory")
481 #define ia64_ptce(addr) asm volatile ("ptc.e %0" :: "r"(addr))
483 #define ia64_ptcga(addr, size) \
484 asm volatile ("ptc.ga %0,%1" :: "r"(addr), "r"(size) : "memory")
486 #define ia64_ptcl(addr, size) \
487 asm volatile ("ptc.l %0,%1" :: "r"(addr), "r"(size) : "memory")
489 #define ia64_ptri(addr, size) \
490 asm volatile ("ptr.i %0,%1" :: "r"(addr), "r"(size) : "memory")
492 #define ia64_ptrd(addr, size) \
493 asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
495 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
497 #define ia64_lfhint_none 0
498 #define ia64_lfhint_nt1 1
499 #define ia64_lfhint_nt2 2
500 #define ia64_lfhint_nta 3
502 #define ia64_lfetch(lfhint, y) \
505 case ia64_lfhint_none: \
506 asm volatile ("lfetch [%0]" : : "r"(y)); \
508 case ia64_lfhint_nt1: \
509 asm volatile ("lfetch.nt1 [%0]" : : "r"(y)); \
511 case ia64_lfhint_nt2: \
512 asm volatile ("lfetch.nt2 [%0]" : : "r"(y)); \
514 case ia64_lfhint_nta: \
515 asm volatile ("lfetch.nta [%0]" : : "r"(y)); \
520 #define ia64_lfetch_excl(lfhint, y) \
523 case ia64_lfhint_none: \
524 asm volatile ("lfetch.excl [%0]" :: "r"(y)); \
526 case ia64_lfhint_nt1: \
527 asm volatile ("lfetch.excl.nt1 [%0]" :: "r"(y)); \
529 case ia64_lfhint_nt2: \
530 asm volatile ("lfetch.excl.nt2 [%0]" :: "r"(y)); \
532 case ia64_lfhint_nta: \
533 asm volatile ("lfetch.excl.nta [%0]" :: "r"(y)); \
538 #define ia64_lfetch_fault(lfhint, y) \
541 case ia64_lfhint_none: \
542 asm volatile ("lfetch.fault [%0]" : : "r"(y)); \
544 case ia64_lfhint_nt1: \
545 asm volatile ("lfetch.fault.nt1 [%0]" : : "r"(y)); \
547 case ia64_lfhint_nt2: \
548 asm volatile ("lfetch.fault.nt2 [%0]" : : "r"(y)); \
550 case ia64_lfhint_nta: \
551 asm volatile ("lfetch.fault.nta [%0]" : : "r"(y)); \
556 #define ia64_lfetch_fault_excl(lfhint, y) \
559 case ia64_lfhint_none: \
560 asm volatile ("lfetch.fault.excl [%0]" :: "r"(y)); \
562 case ia64_lfhint_nt1: \
563 asm volatile ("lfetch.fault.excl.nt1 [%0]" :: "r"(y)); \
565 case ia64_lfhint_nt2: \
566 asm volatile ("lfetch.fault.excl.nt2 [%0]" :: "r"(y)); \
568 case ia64_lfhint_nta: \
569 asm volatile ("lfetch.fault.excl.nta [%0]" :: "r"(y)); \
574 #define ia64_intrin_local_irq_restore(x) \
576 asm volatile (" cmp.ne p6,p7=%0,r0;;" \
580 :: "r"((x)) : "p6", "p7", "memory"); \
583 #endif /* _ASM_IA64_GCC_INTRIN_H */