1 /* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
2 Note: I added some stuff for use with gnupg
4 Copyright (C) 1991, 1992, 1993, 1994, 1996, 1998,
5 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
7 This file is free software; you can redistribute it and/or modify
8 it under the terms of the GNU Library General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or (at your
10 option) any later version.
12 This file is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public
15 License for more details.
17 You should have received a copy of the GNU Library General Public License
18 along with this file; see the file COPYING.LIB. If not, write to
19 the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 MA 02111-1307, USA. */
22 /* You have to define the following before including this file:
24 UWtype -- An unsigned type, default type for operations (typically a "word")
25 UHWtype -- An unsigned type, at least half the size of UWtype.
26 UDWtype -- An unsigned type, at least twice as large a UWtype
27 W_TYPE_SIZE -- size in bits of UWtype
29 SItype, USItype -- Signed and unsigned 32 bit types.
30 DItype, UDItype -- Signed and unsigned 64 bit types.
32 On a 32 bit machine UWtype should typically be USItype;
33 on a 64 bit machine, UWtype should typically be UDItype.
36 #define __BITS4 (W_TYPE_SIZE / 4)
37 #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
38 #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
39 #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
41 /* This is used to make sure no undesirable sharing between different libraries
42 that use this file takes place. */
44 #define __MPN(x) __##x
47 /* Define auxiliary asm macros.
49 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) multiplies two
50 UWtype integers MULTIPLER and MULTIPLICAND, and generates a two UWtype
51 word product in HIGH_PROD and LOW_PROD.
53 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a
54 UDWtype product. This is just a variant of umul_ppmm.
56 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
57 denominator) divides a UDWtype, composed by the UWtype integers
58 HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient
59 in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less
60 than DENOMINATOR for correct operation. If, in addition, the most
61 significant bit of DENOMINATOR must be 1, then the pre-processor symbol
62 UDIV_NEEDS_NORMALIZATION is defined to 1.
64 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator,
65 denominator). Like udiv_qrnnd but the numbers are signed. The quotient
68 5) count_leading_zeros(count, x) counts the number of zero-bits from the
69 msb to the first non-zero bit in the UWtype X. This is the number of
70 steps X needs to be shifted left to set the msb. Undefined for X == 0,
71 unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value.
73 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts
74 from the least significant end.
76 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1,
77 high_addend_2, low_addend_2) adds two UWtype integers, composed by
78 HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2
79 respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow
80 (i.e. carry out) is not stored anywhere, and is lost.
82 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend,
83 high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers,
84 composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and
85 LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE
86 and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere,
89 If any of these macros are left undefined for a particular CPU,
92 /* The CPUs come in alphabetical order below.
94 Please add support for more CPUs here, or improve the current support
95 for the CPUs below! */
97 #if defined (__GNUC__) && !defined (NO_ASM)
99 /* We sometimes need to clobber "cc" with gcc2, but that would not be
100 understood by gcc1. Use cpp to avoid major code duplication. */
103 #define __AND_CLOBBER_CC
104 #else /* __GNUC__ >= 2 */
105 #define __CLOBBER_CC : "cc"
106 #define __AND_CLOBBER_CC , "cc"
107 #endif /* __GNUC__ < 2 */
110 /***************************************
111 ************** A29K *****************
112 ***************************************/
113 #if (defined (__a29k__) || defined (_AM29K)) && W_TYPE_SIZE == 32
114 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
115 __asm__ ("add %1,%4,%5\n" \
117 : "=r" ((USItype)(sh)), \
118 "=&r" ((USItype)(sl)) \
119 : "%r" ((USItype)(ah)), \
120 "rI" ((USItype)(bh)), \
121 "%r" ((USItype)(al)), \
122 "rI" ((USItype)(bl)))
123 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
124 __asm__ ("sub %1,%4,%5\n" \
126 : "=r" ((USItype)(sh)), \
127 "=&r" ((USItype)(sl)) \
128 : "r" ((USItype)(ah)), \
129 "rI" ((USItype)(bh)), \
130 "r" ((USItype)(al)), \
131 "rI" ((USItype)(bl)))
132 #define umul_ppmm(xh, xl, m0, m1) \
134 USItype __m0 = (m0), __m1 = (m1); \
135 __asm__ ("multiplu %0,%1,%2" \
136 : "=r" ((USItype)(xl)) \
139 __asm__ ("multmu %0,%1,%2" \
140 : "=r" ((USItype)(xh)) \
144 #define udiv_qrnnd(q, r, n1, n0, d) \
145 __asm__ ("dividu %0,%3,%4" \
146 : "=r" ((USItype)(q)), \
147 "=q" ((USItype)(r)) \
148 : "1" ((USItype)(n1)), \
149 "r" ((USItype)(n0)), \
152 #define count_leading_zeros(count, x) \
153 __asm__ ("clz %0,%1" \
154 : "=r" ((USItype)(count)) \
155 : "r" ((USItype)(x)))
156 #define COUNT_LEADING_ZEROS_0 32
157 #endif /* __a29k__ */
160 #if defined (__alpha) && W_TYPE_SIZE == 64
161 #define umul_ppmm(ph, pl, m0, m1) \
163 UDItype __m0 = (m0), __m1 = (m1); \
164 __asm__ ("umulh %r1,%2,%0" \
165 : "=r" ((UDItype) ph) \
168 (pl) = __m0 * __m1; \
171 #ifndef LONGLONG_STANDALONE
172 #define udiv_qrnnd(q, r, n1, n0, d) \
174 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
177 extern UDItype __udiv_qrnnd ();
178 #define UDIV_TIME 220
179 #endif /* LONGLONG_STANDALONE */
182 /***************************************
183 ************** ARM ******************
184 ***************************************/
185 #if defined (__arm__) && W_TYPE_SIZE == 32
186 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
187 __asm__ ("adds %1, %4, %5\n" \
189 : "=r" ((USItype)(sh)), \
190 "=&r" ((USItype)(sl)) \
191 : "%r" ((USItype)(ah)), \
192 "rI" ((USItype)(bh)), \
193 "%r" ((USItype)(al)), \
194 "rI" ((USItype)(bl)))
195 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
196 __asm__ ("subs %1, %4, %5\n" \
198 : "=r" ((USItype)(sh)), \
199 "=&r" ((USItype)(sl)) \
200 : "r" ((USItype)(ah)), \
201 "rI" ((USItype)(bh)), \
202 "r" ((USItype)(al)), \
203 "rI" ((USItype)(bl)))
204 #if defined __ARM_ARCH_2__ || defined __ARM_ARCH_3__
205 #define umul_ppmm(xh, xl, a, b) \
206 __asm__ ("%@ Inlined umul_ppmm\n" \
207 "mov %|r0, %2, lsr #16 @ AAAA\n" \
208 "mov %|r2, %3, lsr #16 @ BBBB\n" \
209 "bic %|r1, %2, %|r0, lsl #16 @ aaaa\n" \
210 "bic %0, %3, %|r2, lsl #16 @ bbbb\n" \
211 "mul %1, %|r1, %|r2 @ aaaa * BBBB\n" \
212 "mul %|r2, %|r0, %|r2 @ AAAA * BBBB\n" \
213 "mul %|r1, %0, %|r1 @ aaaa * bbbb\n" \
214 "mul %0, %|r0, %0 @ AAAA * bbbb\n" \
215 "adds %|r0, %1, %0 @ central sum\n" \
216 "addcs %|r2, %|r2, #65536\n" \
217 "adds %1, %|r1, %|r0, lsl #16\n" \
218 "adc %0, %|r2, %|r0, lsr #16" \
219 : "=&r" ((USItype)(xh)), \
220 "=r" ((USItype)(xl)) \
221 : "r" ((USItype)(a)), \
225 #define umul_ppmm(xh, xl, a, b) \
226 __asm__ ("%@ Inlined umul_ppmm\n" \
227 "umull %r1, %r0, %r2, %r3" \
228 : "=&r" ((USItype)(xh)), \
229 "=r" ((USItype)(xl)) \
230 : "r" ((USItype)(a)), \
235 #define UDIV_TIME 100
238 /***************************************
239 ************** CLIPPER **************
240 ***************************************/
241 #if defined (__clipper__) && W_TYPE_SIZE == 32
242 #define umul_ppmm(w1, w0, u, v) \
243 ({union {UDItype __ll; \
244 struct {USItype __l, __h;} __i; \
246 __asm__ ("mulwux %2,%0" \
248 : "%0" ((USItype)(u)), \
249 "r" ((USItype)(v))); \
250 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
251 #define smul_ppmm(w1, w0, u, v) \
252 ({union {DItype __ll; \
253 struct {SItype __l, __h;} __i; \
255 __asm__ ("mulwx %2,%0" \
257 : "%0" ((SItype)(u)), \
258 "r" ((SItype)(v))); \
259 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
260 #define __umulsidi3(u, v) \
262 __asm__ ("mulwux %2,%0" \
264 : "%0" ((USItype)(u)), \
265 "r" ((USItype)(v))); \
267 #endif /* __clipper__ */
270 /***************************************
271 ************** GMICRO ***************
272 ***************************************/
273 #if defined (__gmicro__) && W_TYPE_SIZE == 32
274 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
275 __asm__ ("add.w %5,%1\n" \
277 : "=g" ((USItype)(sh)), \
278 "=&g" ((USItype)(sl)) \
279 : "%0" ((USItype)(ah)), \
280 "g" ((USItype)(bh)), \
281 "%1" ((USItype)(al)), \
283 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
284 __asm__ ("sub.w %5,%1\n" \
286 : "=g" ((USItype)(sh)), \
287 "=&g" ((USItype)(sl)) \
288 : "0" ((USItype)(ah)), \
289 "g" ((USItype)(bh)), \
290 "1" ((USItype)(al)), \
292 #define umul_ppmm(ph, pl, m0, m1) \
293 __asm__ ("mulx %3,%0,%1" \
294 : "=g" ((USItype)(ph)), \
295 "=r" ((USItype)(pl)) \
296 : "%0" ((USItype)(m0)), \
298 #define udiv_qrnnd(q, r, nh, nl, d) \
299 __asm__ ("divx %4,%0,%1" \
300 : "=g" ((USItype)(q)), \
301 "=r" ((USItype)(r)) \
302 : "1" ((USItype)(nh)), \
303 "0" ((USItype)(nl)), \
305 #define count_leading_zeros(count, x) \
306 __asm__ ("bsch/1 %1,%0" \
308 : "g" ((USItype)(x)), \
313 /***************************************
314 ************** HPPA *****************
315 ***************************************/
316 #if defined (__hppa) && W_TYPE_SIZE == 32
317 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
318 __asm__ (" add %4,%5,%1\n" \
320 : "=r" ((USItype)(sh)), \
321 "=&r" ((USItype)(sl)) \
322 : "%rM" ((USItype)(ah)), \
323 "rM" ((USItype)(bh)), \
324 "%rM" ((USItype)(al)), \
325 "rM" ((USItype)(bl)))
326 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
327 __asm__ (" sub %4,%5,%1\n" \
329 : "=r" ((USItype)(sh)), \
330 "=&r" ((USItype)(sl)) \
331 : "rM" ((USItype)(ah)), \
332 "rM" ((USItype)(bh)), \
333 "rM" ((USItype)(al)), \
334 "rM" ((USItype)(bl)))
335 #if defined (_PA_RISC1_1)
336 #define umul_ppmm(wh, wl, u, v) \
338 union {UDItype __ll; \
339 struct {USItype __h, __l;} __i; \
341 __asm__ (" xmpyu %1,%2,%0" \
342 : "=*f" (__xx.__ll) \
343 : "*f" ((USItype)(u)), \
344 "*f" ((USItype)(v))); \
345 (wh) = __xx.__i.__h; \
346 (wl) = __xx.__i.__l; \
354 #ifndef LONGLONG_STANDALONE
355 #define udiv_qrnnd(q, r, n1, n0, d) \
357 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
360 extern USItype __udiv_qrnnd ();
361 #endif /* LONGLONG_STANDALONE */
362 #define count_leading_zeros(count, x) \
367 " extru,= %1,15,16,%%r0 ; Bits 31..16 zero? \n" \
368 " extru,tr %1,15,16,%1 ; No. Shift down, skip add.\n" \
369 " ldo 16(%0),%0 ; Yes. Perform add. \n" \
370 " extru,= %1,23,8,%%r0 ; Bits 15..8 zero? \n" \
371 " extru,tr %1,23,8,%1 ; No. Shift down, skip add.\n" \
372 " ldo 8(%0),%0 ; Yes. Perform add. \n" \
373 " extru,= %1,27,4,%%r0 ; Bits 7..4 zero? \n" \
374 " extru,tr %1,27,4,%1 ; No. Shift down, skip add.\n" \
375 " ldo 4(%0),%0 ; Yes. Perform add. \n" \
376 " extru,= %1,29,2,%%r0 ; Bits 3..2 zero? \n" \
377 " extru,tr %1,29,2,%1 ; No. Shift down, skip add.\n" \
378 " ldo 2(%0),%0 ; Yes. Perform add. \n" \
379 " extru %1,30,1,%1 ; Extract bit 1. \n" \
380 " sub %0,%1,%0 ; Subtract it. " \
381 : "=r" (count), "=r" (__tmp) : "1" (x)); \
386 /***************************************
387 ************** I370 *****************
388 ***************************************/
389 #if (defined (__i370__) || defined (__mvs__)) && W_TYPE_SIZE == 32
390 #define umul_ppmm(xh, xl, m0, m1) \
392 union {UDItype __ll; \
393 struct {USItype __h, __l;} __i; \
395 USItype __m0 = (m0), __m1 = (m1); \
396 __asm__ ("mr %0,%3" \
397 : "=r" (__xx.__i.__h), \
398 "=r" (__xx.__i.__l) \
401 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
402 (xh) += ((((SItype) __m0 >> 31) & __m1) \
403 + (((SItype) __m1 >> 31) & __m0)); \
405 #define smul_ppmm(xh, xl, m0, m1) \
407 union {DItype __ll; \
408 struct {USItype __h, __l;} __i; \
410 __asm__ ("mr %0,%3" \
411 : "=r" (__xx.__i.__h), \
412 "=r" (__xx.__i.__l) \
415 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
417 #define sdiv_qrnnd(q, r, n1, n0, d) \
419 union {DItype __ll; \
420 struct {USItype __h, __l;} __i; \
422 __xx.__i.__h = n1; __xx.__i.__l = n0; \
423 __asm__ ("dr %0,%2" \
425 : "0" (__xx.__ll), "r" (d)); \
426 (q) = __xx.__i.__l; (r) = __xx.__i.__h; \
431 /***************************************
432 ************** I386 *****************
433 ***************************************/
435 #if (defined (__i386__) || defined (__i486__)) && W_TYPE_SIZE == 32
436 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
437 __asm__ ("addl %5,%1\n" \
439 : "=r" ((USItype)(sh)), \
440 "=&r" ((USItype)(sl)) \
441 : "%0" ((USItype)(ah)), \
442 "g" ((USItype)(bh)), \
443 "%1" ((USItype)(al)), \
445 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
446 __asm__ ("subl %5,%1\n" \
448 : "=r" ((USItype)(sh)), \
449 "=&r" ((USItype)(sl)) \
450 : "0" ((USItype)(ah)), \
451 "g" ((USItype)(bh)), \
452 "1" ((USItype)(al)), \
454 #define umul_ppmm(w1, w0, u, v) \
456 : "=a" ((USItype)(w0)), \
457 "=d" ((USItype)(w1)) \
458 : "%0" ((USItype)(u)), \
460 #define udiv_qrnnd(q, r, n1, n0, d) \
462 : "=a" ((USItype)(q)), \
463 "=d" ((USItype)(r)) \
464 : "0" ((USItype)(n0)), \
465 "1" ((USItype)(n1)), \
467 #define count_leading_zeros(count, x) \
470 __asm__ ("bsrl %1,%0" \
471 : "=r" (__cbtmp) : "rm" ((USItype)(x))); \
472 (count) = __cbtmp ^ 31; \
474 #define count_trailing_zeros(count, x) \
475 __asm__ ("bsfl %1,%0" : "=r" (count) : "rm" ((USItype)(x)))
485 /***************************************
486 ************** I860 *****************
487 ***************************************/
488 #if defined (__i860__) && W_TYPE_SIZE == 32
489 #define rshift_rhlc(r,h,l,c) \
490 __asm__ ("shr %3,r0,r0\n" \
492 "=r" (r) : "r" (h), "r" (l), "rn" (c))
495 /***************************************
496 ************** I960 *****************
497 ***************************************/
498 #if defined (__i960__) && W_TYPE_SIZE == 32
499 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
500 __asm__ ("cmpo 1,0\n" \
503 : "=r" ((USItype)(sh)), \
504 "=&r" ((USItype)(sl)) \
505 : "%dI" ((USItype)(ah)), \
506 "dI" ((USItype)(bh)), \
507 "%dI" ((USItype)(al)), \
508 "dI" ((USItype)(bl)))
509 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
510 __asm__ ("cmpo 0,0\n" \
513 : "=r" ((USItype)(sh)), \
514 "=&r" ((USItype)(sl)) \
515 : "dI" ((USItype)(ah)), \
516 "dI" ((USItype)(bh)), \
517 "dI" ((USItype)(al)), \
518 "dI" ((USItype)(bl)))
519 #define umul_ppmm(w1, w0, u, v) \
520 ({union {UDItype __ll; \
521 struct {USItype __l, __h;} __i; \
523 __asm__ ("emul %2,%1,%0" \
525 : "%dI" ((USItype)(u)), \
526 "dI" ((USItype)(v))); \
527 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
528 #define __umulsidi3(u, v) \
530 __asm__ ("emul %2,%1,%0" \
532 : "%dI" ((USItype)(u)), \
533 "dI" ((USItype)(v))); \
535 #define udiv_qrnnd(q, r, nh, nl, d) \
537 union {UDItype __ll; \
538 struct {USItype __l, __h;} __i; \
540 __nn.__i.__h = (nh); __nn.__i.__l = (nl); \
541 __asm__ ("ediv %d,%n,%0" \
543 : "dI" (__nn.__ll), \
544 "dI" ((USItype)(d))); \
545 (r) = __rq.__i.__l; (q) = __rq.__i.__h; \
547 #define count_leading_zeros(count, x) \
550 __asm__ ("scanbit %1,%0" \
552 : "r" ((USItype)(x))); \
553 (count) = __cbtmp ^ 31; \
555 #define COUNT_LEADING_ZEROS_0 (-32) /* sic */
556 #if defined (__i960mx) /* what is the proper symbol to test??? */
557 #define rshift_rhlc(r,h,l,c) \
559 union {UDItype __ll; \
560 struct {USItype __l, __h;} __i; \
562 __nn.__i.__h = (h); __nn.__i.__l = (l); \
563 __asm__ ("shre %2,%1,%0" \
564 : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \
570 /***************************************
571 ************** 68000 ****************
572 ***************************************/
573 #if (defined (__mc68000__) || defined (__mc68020__) || defined (__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32
574 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
575 __asm__ ("add%.l %5,%1\n" \
577 : "=d" ((USItype)(sh)), \
578 "=&d" ((USItype)(sl)) \
579 : "%0" ((USItype)(ah)), \
580 "d" ((USItype)(bh)), \
581 "%1" ((USItype)(al)), \
583 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
584 __asm__ ("sub%.l %5,%1\n" \
586 : "=d" ((USItype)(sh)), \
587 "=&d" ((USItype)(sl)) \
588 : "0" ((USItype)(ah)), \
589 "d" ((USItype)(bh)), \
590 "1" ((USItype)(al)), \
592 #if (defined (__mc68020__) || defined (__NeXT__) || defined(mc68020))
593 #define umul_ppmm(w1, w0, u, v) \
594 __asm__ ("mulu%.l %3,%1:%0" \
595 : "=d" ((USItype)(w0)), \
596 "=d" ((USItype)(w1)) \
597 : "%0" ((USItype)(u)), \
598 "dmi" ((USItype)(v)))
600 #define udiv_qrnnd(q, r, n1, n0, d) \
601 __asm__ ("divu%.l %4,%1:%0" \
602 : "=d" ((USItype)(q)), \
603 "=d" ((USItype)(r)) \
604 : "0" ((USItype)(n0)), \
605 "1" ((USItype)(n1)), \
606 "dmi" ((USItype)(d)))
608 #define sdiv_qrnnd(q, r, n1, n0, d) \
609 __asm__ ("divs%.l %4,%1:%0" \
610 : "=d" ((USItype)(q)), \
611 "=d" ((USItype)(r)) \
612 : "0" ((USItype)(n0)), \
613 "1" ((USItype)(n1)), \
614 "dmi" ((USItype)(d)))
615 #define count_leading_zeros(count, x) \
616 __asm__ ("bfffo %1{%b2:%b2},%0" \
617 : "=d" ((USItype)(count)) \
618 : "od" ((USItype)(x)), "n" (0))
619 #define COUNT_LEADING_ZEROS_0 32
620 #else /* not mc68020 */
621 #define umul_ppmm(xh, xl, a, b) \
622 do { USItype __umul_tmp1, __umul_tmp2; \
623 __asm__ ("| Inlined umul_ppmm \n" \
624 " move%.l %5,%3 \n" \
625 " move%.l %2,%0 \n" \
626 " move%.w %3,%1 \n" \
636 " add%.l %#0x10000,%0 \n" \
637 "1: move%.l %2,%3 \n" \
643 " addx%.l %2,%0 \n" \
644 " | End inlined umul_ppmm" \
645 : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \
646 "=d" (__umul_tmp1), "=&d" (__umul_tmp2) \
647 : "%2" ((USItype)(a)), "d" ((USItype)(b))); \
649 #define UMUL_TIME 100
650 #define UDIV_TIME 400
651 #endif /* not mc68020 */
655 /***************************************
656 ************** 88000 ****************
657 ***************************************/
658 #if defined (__m88000__) && W_TYPE_SIZE == 32
659 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
660 __asm__ ("addu.co %1,%r4,%r5\n" \
661 "addu.ci %0,%r2,%r3" \
662 : "=r" ((USItype)(sh)), \
663 "=&r" ((USItype)(sl)) \
664 : "%rJ" ((USItype)(ah)), \
665 "rJ" ((USItype)(bh)), \
666 "%rJ" ((USItype)(al)), \
667 "rJ" ((USItype)(bl)))
668 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
669 __asm__ ("subu.co %1,%r4,%r5\n" \
670 "subu.ci %0,%r2,%r3" \
671 : "=r" ((USItype)(sh)), \
672 "=&r" ((USItype)(sl)) \
673 : "rJ" ((USItype)(ah)), \
674 "rJ" ((USItype)(bh)), \
675 "rJ" ((USItype)(al)), \
676 "rJ" ((USItype)(bl)))
677 #define count_leading_zeros(count, x) \
680 __asm__ ("ff1 %0,%1" \
682 : "r" ((USItype)(x))); \
683 (count) = __cbtmp ^ 31; \
685 #define COUNT_LEADING_ZEROS_0 63 /* sic */
686 #if defined (__m88110__)
687 #define umul_ppmm(wh, wl, u, v) \
689 union {UDItype __ll; \
690 struct {USItype __h, __l;} __i; \
692 __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \
693 (wh) = __x.__i.__h; \
694 (wl) = __x.__i.__l; \
696 #define udiv_qrnnd(q, r, n1, n0, d) \
697 ({union {UDItype __ll; \
698 struct {USItype __h, __l;} __i; \
700 __x.__i.__h = (n1); __x.__i.__l = (n0); \
701 __asm__ ("divu.d %0,%1,%2" \
702 : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \
703 (r) = (n0) - __q.__l * (d); (q) = __q.__l; })
708 #define UDIV_TIME 150
709 #endif /* __m88110__ */
710 #endif /* __m88000__ */
712 /***************************************
713 ************** MIPS *****************
714 ***************************************/
715 #if defined (__mips__) && W_TYPE_SIZE == 32
716 #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
717 #define umul_ppmm(w1, w0, u, v) \
718 __asm__ ("multu %2,%3" \
719 : "=l" ((USItype)(w0)), \
720 "=h" ((USItype)(w1)) \
721 : "d" ((USItype)(u)), \
724 #define umul_ppmm(w1, w0, u, v) \
725 __asm__ ("multu %2,%3 \n" \
728 : "=d" ((USItype)(w0)), \
729 "=d" ((USItype)(w1)) \
730 : "d" ((USItype)(u)), \
734 #define UDIV_TIME 100
735 #endif /* __mips__ */
737 /***************************************
738 ************** MIPS/64 **************
739 ***************************************/
740 #if (defined (__mips) && __mips >= 3) && W_TYPE_SIZE == 64
741 #if __GNUC__ > 2 || __GNUC_MINOR__ >= 7
742 #define umul_ppmm(w1, w0, u, v) \
743 __asm__ ("dmultu %2,%3" \
744 : "=l" ((UDItype)(w0)), \
745 "=h" ((UDItype)(w1)) \
746 : "d" ((UDItype)(u)), \
749 #define umul_ppmm(w1, w0, u, v) \
750 __asm__ ("dmultu %2,%3 \n" \
753 : "=d" ((UDItype)(w0)), \
754 "=d" ((UDItype)(w1)) \
755 : "d" ((UDItype)(u)), \
759 #define UDIV_TIME 140
760 #endif /* __mips__ */
763 /***************************************
764 ************** 32000 ****************
765 ***************************************/
766 #if defined (__ns32000__) && W_TYPE_SIZE == 32
767 #define umul_ppmm(w1, w0, u, v) \
768 ({union {UDItype __ll; \
769 struct {USItype __l, __h;} __i; \
771 __asm__ ("meid %2,%0" \
773 : "%0" ((USItype)(u)), \
774 "g" ((USItype)(v))); \
775 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
776 #define __umulsidi3(u, v) \
778 __asm__ ("meid %2,%0" \
780 : "%0" ((USItype)(u)), \
781 "g" ((USItype)(v))); \
783 #define udiv_qrnnd(q, r, n1, n0, d) \
784 ({union {UDItype __ll; \
785 struct {USItype __l, __h;} __i; \
787 __xx.__i.__h = (n1); __xx.__i.__l = (n0); \
788 __asm__ ("deid %2,%0" \
791 "g" ((USItype)(d))); \
792 (r) = __xx.__i.__l; (q) = __xx.__i.__h; })
793 #define count_trailing_zeros(count,x) \
795 __asm__ ("ffsd %2,%0" \
796 : "=r" ((USItype) (count)) \
797 : "0" ((USItype) 0), \
798 "r" ((USItype) (x))); \
800 #endif /* __ns32000__ */
803 /***************************************
804 ************** PPC ******************
805 ***************************************/
806 #if (defined (_ARCH_PPC) || defined (_IBMR2)) && W_TYPE_SIZE == 32
807 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
809 if (__builtin_constant_p (bh) && (bh) == 0) \
810 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \
811 : "=r" ((USItype)(sh)), \
812 "=&r" ((USItype)(sl)) \
813 : "%r" ((USItype)(ah)), \
814 "%r" ((USItype)(al)), \
815 "rI" ((USItype)(bl))); \
816 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
817 __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \
818 : "=r" ((USItype)(sh)), \
819 "=&r" ((USItype)(sl)) \
820 : "%r" ((USItype)(ah)), \
821 "%r" ((USItype)(al)), \
822 "rI" ((USItype)(bl))); \
824 __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \
825 : "=r" ((USItype)(sh)), \
826 "=&r" ((USItype)(sl)) \
827 : "%r" ((USItype)(ah)), \
828 "r" ((USItype)(bh)), \
829 "%r" ((USItype)(al)), \
830 "rI" ((USItype)(bl))); \
832 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
834 if (__builtin_constant_p (ah) && (ah) == 0) \
835 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \
836 : "=r" ((USItype)(sh)), \
837 "=&r" ((USItype)(sl)) \
838 : "r" ((USItype)(bh)), \
839 "rI" ((USItype)(al)), \
840 "r" ((USItype)(bl))); \
841 else if (__builtin_constant_p (ah) && (ah) ==~(USItype) 0) \
842 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \
843 : "=r" ((USItype)(sh)), \
844 "=&r" ((USItype)(sl)) \
845 : "r" ((USItype)(bh)), \
846 "rI" ((USItype)(al)), \
847 "r" ((USItype)(bl))); \
848 else if (__builtin_constant_p (bh) && (bh) == 0) \
849 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \
850 : "=r" ((USItype)(sh)), \
851 "=&r" ((USItype)(sl)) \
852 : "r" ((USItype)(ah)), \
853 "rI" ((USItype)(al)), \
854 "r" ((USItype)(bl))); \
855 else if (__builtin_constant_p (bh) && (bh) ==~(USItype) 0) \
856 __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \
857 : "=r" ((USItype)(sh)), \
858 "=&r" ((USItype)(sl)) \
859 : "r" ((USItype)(ah)), \
860 "rI" ((USItype)(al)), \
861 "r" ((USItype)(bl))); \
863 __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \
864 : "=r" ((USItype)(sh)), \
865 "=&r" ((USItype)(sl)) \
866 : "r" ((USItype)(ah)), \
867 "r" ((USItype)(bh)), \
868 "rI" ((USItype)(al)), \
869 "r" ((USItype)(bl))); \
871 #define count_leading_zeros(count, x) \
872 __asm__ ("{cntlz|cntlzw} %0,%1" \
873 : "=r" ((USItype)(count)) \
874 : "r" ((USItype)(x)))
875 #define COUNT_LEADING_ZEROS_0 32
876 #if defined (_ARCH_PPC)
877 #define umul_ppmm(ph, pl, m0, m1) \
879 USItype __m0 = (m0), __m1 = (m1); \
880 __asm__ ("mulhwu %0,%1,%2" \
881 : "=r" ((USItype) ph) \
884 (pl) = __m0 * __m1; \
887 #define smul_ppmm(ph, pl, m0, m1) \
889 SItype __m0 = (m0), __m1 = (m1); \
890 __asm__ ("mulhw %0,%1,%2" \
891 : "=r" ((SItype) ph) \
894 (pl) = __m0 * __m1; \
897 #define UDIV_TIME 120
899 #define umul_ppmm(xh, xl, m0, m1) \
901 USItype __m0 = (m0), __m1 = (m1); \
902 __asm__ ("mul %0,%2,%3" \
903 : "=r" ((USItype)(xh)), \
904 "=q" ((USItype)(xl)) \
907 (xh) += ((((SItype) __m0 >> 31) & __m1) \
908 + (((SItype) __m1 >> 31) & __m0)); \
911 #define smul_ppmm(xh, xl, m0, m1) \
912 __asm__ ("mul %0,%2,%3" \
913 : "=r" ((SItype)(xh)), \
914 "=q" ((SItype)(xl)) \
918 #define sdiv_qrnnd(q, r, nh, nl, d) \
919 __asm__ ("div %0,%2,%4" \
920 : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \
921 : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d)))
922 #define UDIV_TIME 100
924 #endif /* Power architecture variants. */
927 /***************************************
928 ************** PYR ******************
929 ***************************************/
930 #if defined (__pyr__) && W_TYPE_SIZE == 32
931 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
932 __asm__ ("addw %5,%1 \n" \
934 : "=r" ((USItype)(sh)), \
935 "=&r" ((USItype)(sl)) \
936 : "%0" ((USItype)(ah)), \
937 "g" ((USItype)(bh)), \
938 "%1" ((USItype)(al)), \
940 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
941 __asm__ ("subw %5,%1 \n" \
943 : "=r" ((USItype)(sh)), \
944 "=&r" ((USItype)(sl)) \
945 : "0" ((USItype)(ah)), \
946 "g" ((USItype)(bh)), \
947 "1" ((USItype)(al)), \
949 /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */
950 #define umul_ppmm(w1, w0, u, v) \
951 ({union {UDItype __ll; \
952 struct {USItype __h, __l;} __i; \
954 __asm__ ("movw %1,%R0 \n" \
956 : "=&r" (__xx.__ll) \
957 : "g" ((USItype) (u)), \
958 "g" ((USItype)(v))); \
959 (w1) = __xx.__i.__h; (w0) = __xx.__i.__l;})
963 /***************************************
964 ************** RT/ROMP **************
965 ***************************************/
966 #if defined (__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32
967 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
968 __asm__ ("a %1,%5 \n" \
970 : "=r" ((USItype)(sh)), \
971 "=&r" ((USItype)(sl)) \
972 : "%0" ((USItype)(ah)), \
973 "r" ((USItype)(bh)), \
974 "%1" ((USItype)(al)), \
976 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
977 __asm__ ("s %1,%5\n" \
979 : "=r" ((USItype)(sh)), \
980 "=&r" ((USItype)(sl)) \
981 : "0" ((USItype)(ah)), \
982 "r" ((USItype)(bh)), \
983 "1" ((USItype)(al)), \
985 #define umul_ppmm(ph, pl, m0, m1) \
987 USItype __m0 = (m0), __m1 = (m1); \
1009 : "=r" ((USItype)(ph)), \
1010 "=r" ((USItype)(pl)) \
1014 (ph) += ((((SItype) __m0 >> 31) & __m1) \
1015 + (((SItype) __m1 >> 31) & __m0)); \
1017 #define UMUL_TIME 20
1018 #define UDIV_TIME 200
1019 #define count_leading_zeros(count, x) \
1021 if ((x) >= 0x10000) \
1022 __asm__ ("clz %0,%1" \
1023 : "=r" ((USItype)(count)) \
1024 : "r" ((USItype)(x) >> 16)); \
1027 __asm__ ("clz %0,%1" \
1028 : "=r" ((USItype)(count)) \
1029 : "r" ((USItype)(x))); \
1033 #endif /* RT/ROMP */
1036 /***************************************
1037 ************** SH2 ******************
1038 ***************************************/
1039 #if (defined (__sh2__) || defined(__sh3__) || defined(__SH4__) ) \
1040 && W_TYPE_SIZE == 32
1041 #define umul_ppmm(w1, w0, u, v) \
1046 : "=r" ((USItype)(w1)), \
1047 "=r" ((USItype)(w0)) \
1048 : "r" ((USItype)(u)), \
1049 "r" ((USItype)(v)) \
1054 /***************************************
1055 ************** SPARC ****************
1056 ***************************************/
1057 #if defined (__sparc__) && W_TYPE_SIZE == 32
1058 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1059 __asm__ ("addcc %r4,%5,%1\n" \
1061 : "=r" ((USItype)(sh)), \
1062 "=&r" ((USItype)(sl)) \
1063 : "%rJ" ((USItype)(ah)), \
1064 "rI" ((USItype)(bh)), \
1065 "%rJ" ((USItype)(al)), \
1066 "rI" ((USItype)(bl)) \
1068 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1069 __asm__ ("subcc %r4,%5,%1\n" \
1071 : "=r" ((USItype)(sh)), \
1072 "=&r" ((USItype)(sl)) \
1073 : "rJ" ((USItype)(ah)), \
1074 "rI" ((USItype)(bh)), \
1075 "rJ" ((USItype)(al)), \
1076 "rI" ((USItype)(bl)) \
1078 #if defined (__sparc_v8__)
1079 /* Don't match immediate range because, 1) it is not often useful,
1080 2) the 'I' flag thinks of the range as a 13 bit signed interval,
1081 while we want to match a 13 bit interval, sign extended to 32 bits,
1082 but INTERPRETED AS UNSIGNED. */
1083 #define umul_ppmm(w1, w0, u, v) \
1084 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
1085 : "=r" ((USItype)(w1)), \
1086 "=r" ((USItype)(w0)) \
1087 : "r" ((USItype)(u)), \
1090 #ifndef SUPERSPARC /* SuperSPARC's udiv only handles 53 bit dividends */
1091 #define udiv_qrnnd(q, r, n1, n0, d) \
1094 __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \
1095 : "=r" ((USItype)(__q)) \
1096 : "r" ((USItype)(n1)), \
1097 "r" ((USItype)(n0)), \
1098 "r" ((USItype)(d))); \
1099 (r) = (n0) - __q * (d); \
1102 #define UDIV_TIME 25
1103 #endif /* SUPERSPARC */
1104 #else /* ! __sparc_v8__ */
1105 #if defined (__sparclite__)
1106 /* This has hardware multiply but not divide. It also has two additional
1107 instructions scan (ffs from high bit) and divscc. */
1108 #define umul_ppmm(w1, w0, u, v) \
1109 __asm__ ("umul %2,%3,%1;rd %%y,%0" \
1110 : "=r" ((USItype)(w1)), \
1111 "=r" ((USItype)(w0)) \
1112 : "r" ((USItype)(u)), \
1115 #define udiv_qrnnd(q, r, n1, n0, d) \
1116 __asm__ ("! Inlined udiv_qrnnd \n" \
1117 " wr %%g0,%2,%%y ! Not a delayed write for sparclite \n" \
1119 " divscc %3,%4,%%g1 \n" \
1120 " divscc %%g1,%4,%%g1 \n" \
1121 " divscc %%g1,%4,%%g1 \n" \
1122 " divscc %%g1,%4,%%g1 \n" \
1123 " divscc %%g1,%4,%%g1 \n" \
1124 " divscc %%g1,%4,%%g1 \n" \
1125 " divscc %%g1,%4,%%g1 \n" \
1126 " divscc %%g1,%4,%%g1 \n" \
1127 " divscc %%g1,%4,%%g1 \n" \
1128 " divscc %%g1,%4,%%g1 \n" \
1129 " divscc %%g1,%4,%%g1 \n" \
1130 " divscc %%g1,%4,%%g1 \n" \
1131 " divscc %%g1,%4,%%g1 \n" \
1132 " divscc %%g1,%4,%%g1 \n" \
1133 " divscc %%g1,%4,%%g1 \n" \
1134 " divscc %%g1,%4,%%g1 \n" \
1135 " divscc %%g1,%4,%%g1 \n" \
1136 " divscc %%g1,%4,%%g1 \n" \
1137 " divscc %%g1,%4,%%g1 \n" \
1138 " divscc %%g1,%4,%%g1 \n" \
1139 " divscc %%g1,%4,%%g1 \n" \
1140 " divscc %%g1,%4,%%g1 \n" \
1141 " divscc %%g1,%4,%%g1 \n" \
1142 " divscc %%g1,%4,%%g1 \n" \
1143 " divscc %%g1,%4,%%g1 \n" \
1144 " divscc %%g1,%4,%%g1 \n" \
1145 " divscc %%g1,%4,%%g1 \n" \
1146 " divscc %%g1,%4,%%g1 \n" \
1147 " divscc %%g1,%4,%%g1 \n" \
1148 " divscc %%g1,%4,%%g1 \n" \
1149 " divscc %%g1,%4,%%g1 \n" \
1150 " divscc %%g1,%4,%0 \n" \
1153 " add %1,%4,%1 \n" \
1154 "1: ! End of inline udiv_qrnnd" \
1155 : "=r" ((USItype)(q)), \
1156 "=r" ((USItype)(r)) \
1157 : "r" ((USItype)(n1)), \
1158 "r" ((USItype)(n0)), \
1159 "rI" ((USItype)(d)) \
1160 : "%g1" __AND_CLOBBER_CC)
1161 #define UDIV_TIME 37
1162 #define count_leading_zeros(count, x) \
1163 __asm__ ("scan %1,0,%0" \
1164 : "=r" ((USItype)(x)) \
1165 : "r" ((USItype)(count)))
1166 /* Early sparclites return 63 for an argument of 0, but they warn that future
1167 implementations might change this. Therefore, leave COUNT_LEADING_ZEROS_0
1169 #endif /* __sparclite__ */
1170 #endif /* __sparc_v8__ */
1171 /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */
1173 #define umul_ppmm(w1, w0, u, v) \
1174 __asm__ ("! Inlined umul_ppmm \n" \
1175 " wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr \n" \
1176 " sra %3,31,%%g2 ! Don't move this insn \n" \
1177 " and %2,%%g2,%%g2 ! Don't move this insn \n" \
1178 " andcc %%g0,0,%%g1 ! Don't move this insn \n" \
1179 " mulscc %%g1,%3,%%g1 \n" \
1180 " mulscc %%g1,%3,%%g1 \n" \
1181 " mulscc %%g1,%3,%%g1 \n" \
1182 " mulscc %%g1,%3,%%g1 \n" \
1183 " mulscc %%g1,%3,%%g1 \n" \
1184 " mulscc %%g1,%3,%%g1 \n" \
1185 " mulscc %%g1,%3,%%g1 \n" \
1186 " mulscc %%g1,%3,%%g1 \n" \
1187 " mulscc %%g1,%3,%%g1 \n" \
1188 " mulscc %%g1,%3,%%g1 \n" \
1189 " mulscc %%g1,%3,%%g1 \n" \
1190 " mulscc %%g1,%3,%%g1 \n" \
1191 " mulscc %%g1,%3,%%g1 \n" \
1192 " mulscc %%g1,%3,%%g1 \n" \
1193 " mulscc %%g1,%3,%%g1 \n" \
1194 " mulscc %%g1,%3,%%g1 \n" \
1195 " mulscc %%g1,%3,%%g1 \n" \
1196 " mulscc %%g1,%3,%%g1 \n" \
1197 " mulscc %%g1,%3,%%g1 \n" \
1198 " mulscc %%g1,%3,%%g1 \n" \
1199 " mulscc %%g1,%3,%%g1 \n" \
1200 " mulscc %%g1,%3,%%g1 \n" \
1201 " mulscc %%g1,%3,%%g1 \n" \
1202 " mulscc %%g1,%3,%%g1 \n" \
1203 " mulscc %%g1,%3,%%g1 \n" \
1204 " mulscc %%g1,%3,%%g1 \n" \
1205 " mulscc %%g1,%3,%%g1 \n" \
1206 " mulscc %%g1,%3,%%g1 \n" \
1207 " mulscc %%g1,%3,%%g1 \n" \
1208 " mulscc %%g1,%3,%%g1 \n" \
1209 " mulscc %%g1,%3,%%g1 \n" \
1210 " mulscc %%g1,%3,%%g1 \n" \
1211 " mulscc %%g1,0,%%g1 \n" \
1212 " add %%g1,%%g2,%0 \n" \
1214 : "=r" ((USItype)(w1)), \
1215 "=r" ((USItype)(w0)) \
1216 : "%rI" ((USItype)(u)), \
1217 "r" ((USItype)(v)) \
1218 : "%g1", "%g2" __AND_CLOBBER_CC)
1219 #define UMUL_TIME 39 /* 39 instructions */
1222 #ifndef LONGLONG_STANDALONE
1223 #define udiv_qrnnd(q, r, n1, n0, d) \
1225 (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \
1228 extern USItype __udiv_qrnnd ();
1229 #define UDIV_TIME 140
1230 #endif /* LONGLONG_STANDALONE */
1231 #endif /* udiv_qrnnd */
1232 #endif /* __sparc__ */
1235 /***************************************
1236 ************** VAX ******************
1237 ***************************************/
1238 #if defined (__vax__) && W_TYPE_SIZE == 32
1239 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1240 __asm__ ("addl2 %5,%1\n" \
1242 : "=g" ((USItype)(sh)), \
1243 "=&g" ((USItype)(sl)) \
1244 : "%0" ((USItype)(ah)), \
1245 "g" ((USItype)(bh)), \
1246 "%1" ((USItype)(al)), \
1247 "g" ((USItype)(bl)))
1248 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1249 __asm__ ("subl2 %5,%1\n" \
1251 : "=g" ((USItype)(sh)), \
1252 "=&g" ((USItype)(sl)) \
1253 : "0" ((USItype)(ah)), \
1254 "g" ((USItype)(bh)), \
1255 "1" ((USItype)(al)), \
1256 "g" ((USItype)(bl)))
1257 #define umul_ppmm(xh, xl, m0, m1) \
1259 union {UDItype __ll; \
1260 struct {USItype __l, __h;} __i; \
1262 USItype __m0 = (m0), __m1 = (m1); \
1263 __asm__ ("emul %1,%2,$0,%0" \
1264 : "=g" (__xx.__ll) \
1267 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
1268 (xh) += ((((SItype) __m0 >> 31) & __m1) \
1269 + (((SItype) __m1 >> 31) & __m0)); \
1271 #define sdiv_qrnnd(q, r, n1, n0, d) \
1273 union {DItype __ll; \
1274 struct {SItype __l, __h;} __i; \
1276 __xx.__i.__h = n1; __xx.__i.__l = n0; \
1277 __asm__ ("ediv %3,%2,%0,%1" \
1278 : "=g" (q), "=g" (r) \
1279 : "g" (__xx.__ll), "g" (d)); \
1281 #endif /* __vax__ */
1284 /***************************************
1285 ************** Z8000 ****************
1286 ***************************************/
1287 #if defined (__z8000__) && W_TYPE_SIZE == 16
1288 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1289 __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \
1290 : "=r" ((unsigned int)(sh)), \
1291 "=&r" ((unsigned int)(sl)) \
1292 : "%0" ((unsigned int)(ah)), \
1293 "r" ((unsigned int)(bh)), \
1294 "%1" ((unsigned int)(al)), \
1295 "rQR" ((unsigned int)(bl)))
1296 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1297 __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \
1298 : "=r" ((unsigned int)(sh)), \
1299 "=&r" ((unsigned int)(sl)) \
1300 : "0" ((unsigned int)(ah)), \
1301 "r" ((unsigned int)(bh)), \
1302 "1" ((unsigned int)(al)), \
1303 "rQR" ((unsigned int)(bl)))
1304 #define umul_ppmm(xh, xl, m0, m1) \
1306 union {long int __ll; \
1307 struct {unsigned int __h, __l;} __i; \
1309 unsigned int __m0 = (m0), __m1 = (m1); \
1310 __asm__ ("mult %S0,%H3" \
1311 : "=r" (__xx.__i.__h), \
1312 "=r" (__xx.__i.__l) \
1315 (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \
1316 (xh) += ((((signed int) __m0 >> 15) & __m1) \
1317 + (((signed int) __m1 >> 15) & __m0)); \
1319 #endif /* __z8000__ */
1321 #endif /* __GNUC__ */
1324 /***************************************
1325 *********** Generic Versions ********
1326 ***************************************/
1327 #if !defined (umul_ppmm) && defined (__umulsidi3)
1328 #define umul_ppmm(ph, pl, m0, m1) \
1330 UDWtype __ll = __umulsidi3 (m0, m1); \
1331 ph = (UWtype) (__ll >> W_TYPE_SIZE); \
1332 pl = (UWtype) __ll; \
1336 #if !defined (__umulsidi3)
1337 #define __umulsidi3(u, v) \
1338 ({UWtype __hi, __lo; \
1339 umul_ppmm (__hi, __lo, u, v); \
1340 ((UDWtype) __hi << W_TYPE_SIZE) | __lo; })
1343 /* If this machine has no inline assembler, use C macros. */
1345 #if !defined (add_ssaaaa)
1346 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
1349 __x = (al) + (bl); \
1350 (sh) = (ah) + (bh) + (__x < (al)); \
1355 #if !defined (sub_ddmmss)
1356 #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
1359 __x = (al) - (bl); \
1360 (sh) = (ah) - (bh) - (__x > (al)); \
1365 #if !defined (umul_ppmm)
1366 #define umul_ppmm(w1, w0, u, v) \
1368 UWtype __x0, __x1, __x2, __x3; \
1369 UHWtype __ul, __vl, __uh, __vh; \
1370 UWtype __u = (u), __v = (v); \
1372 __ul = __ll_lowpart (__u); \
1373 __uh = __ll_highpart (__u); \
1374 __vl = __ll_lowpart (__v); \
1375 __vh = __ll_highpart (__v); \
1377 __x0 = (UWtype) __ul * __vl; \
1378 __x1 = (UWtype) __ul * __vh; \
1379 __x2 = (UWtype) __uh * __vl; \
1380 __x3 = (UWtype) __uh * __vh; \
1382 __x1 += __ll_highpart (__x0);/* this can't give carry */ \
1383 __x1 += __x2; /* but this indeed can */ \
1384 if (__x1 < __x2) /* did we get it? */ \
1385 __x3 += __ll_B; /* yes, add it in the proper pos. */ \
1387 (w1) = __x3 + __ll_highpart (__x1); \
1388 (w0) = (__ll_lowpart (__x1) << W_TYPE_SIZE/2) + __ll_lowpart (__x0);\
1392 #if !defined (umul_ppmm)
1393 #define smul_ppmm(w1, w0, u, v) \
1396 UWtype __m0 = (u), __m1 = (v); \
1397 umul_ppmm (__w1, w0, __m0, __m1); \
1398 (w1) = __w1 - (-(__m0 >> (W_TYPE_SIZE - 1)) & __m1) \
1399 - (-(__m1 >> (W_TYPE_SIZE - 1)) & __m0); \
1403 /* Define this unconditionally, so it can be used for debugging. */
1404 #define __udiv_qrnnd_c(q, r, n1, n0, d) \
1406 UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \
1407 __d1 = __ll_highpart (d); \
1408 __d0 = __ll_lowpart (d); \
1410 __r1 = (n1) % __d1; \
1411 __q1 = (n1) / __d1; \
1412 __m = (UWtype) __q1 * __d0; \
1413 __r1 = __r1 * __ll_B | __ll_highpart (n0); \
1416 __q1--, __r1 += (d); \
1417 if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
1419 __q1--, __r1 += (d); \
1423 __r0 = __r1 % __d1; \
1424 __q0 = __r1 / __d1; \
1425 __m = (UWtype) __q0 * __d0; \
1426 __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
1429 __q0--, __r0 += (d); \
1432 __q0--, __r0 += (d); \
1436 (q) = (UWtype) __q1 * __ll_B | __q0; \
1440 /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through
1441 __udiv_w_sdiv (defined in libgcc or elsewhere). */
1442 #if !defined (udiv_qrnnd) && defined (sdiv_qrnnd)
1443 #define udiv_qrnnd(q, r, nh, nl, d) \
1446 (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \
1451 /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
1452 #if !defined (udiv_qrnnd)
1453 #define UDIV_NEEDS_NORMALIZATION 1
1454 #define udiv_qrnnd __udiv_qrnnd_c
1457 #undef count_leading_zeros
1458 #if !defined (count_leading_zeros)
1463 unsigned char __clz_tab[];
1464 #define count_leading_zeros(count, x) \
1466 UWtype __xr = (x); \
1469 if (W_TYPE_SIZE <= 32) \
1471 __a = __xr < ((UWtype) 1 << 2*__BITS4) \
1472 ? (__xr < ((UWtype) 1 << __BITS4) ? 0 : __BITS4) \
1473 : (__xr < ((UWtype) 1 << 3*__BITS4) ? 2*__BITS4 : 3*__BITS4);\
1477 for (__a = W_TYPE_SIZE - 8; __a > 0; __a -= 8) \
1478 if (((__xr >> __a) & 0xff) != 0) \
1482 (count) = W_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \
1484 /* This version gives a well-defined value for zero. */
1485 #define COUNT_LEADING_ZEROS_0 W_TYPE_SIZE
1488 #if !defined (count_trailing_zeros)
1489 /* Define count_trailing_zeros using count_leading_zeros. The latter might be
1490 defined in asm, but if it is not, the C version above is good enough. */
1491 #define count_trailing_zeros(count, x) \
1493 UWtype __ctz_x = (x); \
1495 count_leading_zeros (__ctz_c, __ctz_x & -__ctz_x); \
1496 (count) = W_TYPE_SIZE - 1 - __ctz_c; \
1500 #ifndef UDIV_NEEDS_NORMALIZATION
1501 #define UDIV_NEEDS_NORMALIZATION 0