1 #ifndef _M68KNOMMU_DELAY_H
2 #define _M68KNOMMU_DELAY_H
7 * Copyright (C) 1994 Hamish Macdonald
9 * Delay routines, using a pre-computed "loops_per_second" value.
12 extern __inline__ void __delay(unsigned long loops)
14 #if defined(CONFIG_COLDFIRE)
15 /* The coldfire runs this loop at significantly different speeds
16 * depending upon long word alignment or not. We'll pad it to
17 * long word alignment which is the faster version.
18 * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
19 * than using a NOP (0x4e71) instruction because it executes in one
20 * cycle not three and doesn't allow for an arbitary delay waiting
21 * for bus cycles to finish. Also fp/a6 isn't likely to cause a
22 * stall waiting for the register to become valid if such is added
23 * to the coldfire at some stage.
25 __asm__ __volatile__ ( ".balignw 4, 0x4a8e\n\t"
28 : "=d" (loops) : "0" (loops));
30 __asm__ __volatile__ ( "1: subql #1, %0\n\t"
32 : "=d" (loops) : "0" (loops));
37 * Use only for very small delays ( < 1 msec). Should probably use a
38 * lookup table, really, as the multiplications take much too long with
39 * short delays. This is a "reasonable" implementation, though (and the
40 * first constant multiplications gets optimized away if the delay is
44 extern unsigned long loops_per_jiffy;
46 extern __inline__ void udelay(unsigned long usecs)
50 __asm__ __volatile__ ("mulul %1,%0:%2"
53 "d" (loops_per_jiffy*HZ));
56 #elif defined(CONFIG_M68328) || defined(CONFIG_M68EZ328) || \
57 defined(CONFIG_COLDFIRE) || defined(CONFIG_M68360) || \
58 defined(CONFIG_M68VZ328)
59 register unsigned long full_loops, part_loops;
61 full_loops = ((usecs * HZ) / 1000000) * loops_per_jiffy;
62 usecs %= (1000000 / HZ);
63 part_loops = (usecs * HZ * loops_per_jiffy) / 1000000;
65 __delay(full_loops + part_loops);
69 usecs *= 4295; /* 2**32 / 1000000 */
70 __asm__ ("mulul %2,%0:%1"
71 : "=d" (usecs), "=d" (tmp)
72 : "d" (usecs), "1" (loops_per_jiffy*HZ));
77 #endif /* defined(_M68KNOMMU_DELAY_H) */