1 /* atomic.h: These still suck, but the I-cache hit rate is higher.
3 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
4 * Copyright (C) 2000 Anton Blanchard (anton@linuxcare.com.au)
6 * Additions by Keith M Wesolowski (wesolows@foobazco.org) based
7 * on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>.
10 #ifndef __ARCH_SPARC_ATOMIC__
11 #define __ARCH_SPARC_ATOMIC__
13 #include <linux/config.h>
15 typedef struct { volatile int counter; } atomic_t;
19 #define ATOMIC_INIT(i) { (i) }
21 extern int __atomic_add_return(int, atomic_t *);
22 extern void atomic_set(atomic_t *, int);
24 #define atomic_read(v) ((v)->counter)
26 #define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v)))
27 #define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v)))
28 #define atomic_inc(v) ((void)__atomic_add_return( 1, (v)))
29 #define atomic_dec(v) ((void)__atomic_add_return( -1, (v)))
31 #define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v)))
32 #define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v)))
33 #define atomic_inc_return(v) (__atomic_add_return( 1, (v)))
34 #define atomic_dec_return(v) (__atomic_add_return( -1, (v)))
36 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
39 * atomic_inc_and_test - increment and test
40 * @v: pointer of type atomic_t
42 * Atomically increments @v by 1
43 * and returns true if the result is zero, or false for all
46 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
48 #define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
50 /* This is the old 24-bit implementation. It's still used internally
51 * by some sparc-specific code, notably the semaphore implementation.
53 typedef struct { volatile int counter; } atomic24_t;
57 #define ATOMIC24_INIT(i) { (i) }
58 #define atomic24_read(v) ((v)->counter)
59 #define atomic24_set(v, i) (((v)->counter) = i)
62 /* We do the bulk of the actual work out of line in two common
63 * routines in assembler, see arch/sparc/lib/atomic.S for the
66 * For SMP the trick is you embed the spin lock byte within
67 * the word, use the low byte so signedness is easily retained
68 * via a quick arithmetic shift. It looks like this:
70 * ----------------------------------------
71 * | signed 24-bit counter value | lock | atomic_t
72 * ----------------------------------------
76 #define ATOMIC24_INIT(i) { ((i) << 8) }
78 static inline int atomic24_read(const atomic24_t *v)
88 #define atomic24_set(v, i) (((v)->counter) = ((i) << 8))
91 static inline int __atomic24_add(int i, atomic24_t *v)
93 register volatile int *ptr asm("g1");
94 register int increment asm("g2");
95 register int tmp1 asm("g3");
96 register int tmp2 asm("g4");
97 register int tmp3 asm("g7");
102 __asm__ __volatile__(
104 "call ___atomic24_add\n\t"
105 " add %%o7, 8, %%o7\n"
106 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
107 : "0" (increment), "r" (ptr)
113 static inline int __atomic24_sub(int i, atomic24_t *v)
115 register volatile int *ptr asm("g1");
116 register int increment asm("g2");
117 register int tmp1 asm("g3");
118 register int tmp2 asm("g4");
119 register int tmp3 asm("g7");
124 __asm__ __volatile__(
126 "call ___atomic24_sub\n\t"
127 " add %%o7, 8, %%o7\n"
128 : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
129 : "0" (increment), "r" (ptr)
135 #define atomic24_add(i, v) ((void)__atomic24_add((i), (v)))
136 #define atomic24_sub(i, v) ((void)__atomic24_sub((i), (v)))
138 #define atomic24_dec_return(v) __atomic24_sub(1, (v))
139 #define atomic24_inc_return(v) __atomic24_add(1, (v))
141 #define atomic24_sub_and_test(i, v) (__atomic24_sub((i), (v)) == 0)
142 #define atomic24_dec_and_test(v) (__atomic24_sub(1, (v)) == 0)
144 #define atomic24_inc(v) ((void)__atomic24_add(1, (v)))
145 #define atomic24_dec(v) ((void)__atomic24_sub(1, (v)))
147 #define atomic24_add_negative(i, v) (__atomic24_add((i), (v)) < 0)
149 /* Atomic operations are already serializing */
150 #define smp_mb__before_atomic_dec() barrier()
151 #define smp_mb__after_atomic_dec() barrier()
152 #define smp_mb__before_atomic_inc() barrier()
153 #define smp_mb__after_atomic_inc() barrier()
155 #endif /* !(__KERNEL__) */
157 #endif /* !(__ARCH_SPARC_ATOMIC__) */