Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / include / asm-m68k / atomic.h
1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__
3
4
5 #include <asm/system.h> /* local_irq_XXX() */
6
7 /*
8  * Atomic operations that C can't guarantee us.  Useful for
9  * resource counting etc..
10  */
11
12 /*
13  * We do not have SMP m68k systems, so we don't have to deal with that.
14  */
15
16 typedef struct { int counter; } atomic_t;
17 #define ATOMIC_INIT(i)  { (i) }
18
19 #define atomic_read(v)          ((v)->counter)
20 #define atomic_set(v, i)        (((v)->counter) = i)
21
22 static inline void atomic_add(int i, atomic_t *v)
23 {
24         __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
25 }
26
27 static inline void atomic_sub(int i, atomic_t *v)
28 {
29         __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
30 }
31
32 static inline void atomic_inc(atomic_t *v)
33 {
34         __asm__ __volatile__("addql #1,%0" : "+m" (*v));
35 }
36
37 static inline void atomic_dec(atomic_t *v)
38 {
39         __asm__ __volatile__("subql #1,%0" : "+m" (*v));
40 }
41
42 static inline int atomic_dec_and_test(atomic_t *v)
43 {
44         char c;
45         __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
46         return c != 0;
47 }
48
49 static inline int atomic_inc_and_test(atomic_t *v)
50 {
51         char c;
52         __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
53         return c != 0;
54 }
55
56 #ifdef CONFIG_RMW_INSNS
57
58 static inline int atomic_add_return(int i, atomic_t *v)
59 {
60         int t, tmp;
61
62         __asm__ __volatile__(
63                         "1:     movel %2,%1\n"
64                         "       addl %3,%1\n"
65                         "       casl %2,%1,%0\n"
66                         "       jne 1b"
67                         : "+m" (*v), "=&d" (t), "=&d" (tmp)
68                         : "g" (i), "2" (atomic_read(v)));
69         return t;
70 }
71
72 static inline int atomic_sub_return(int i, atomic_t *v)
73 {
74         int t, tmp;
75
76         __asm__ __volatile__(
77                         "1:     movel %2,%1\n"
78                         "       subl %3,%1\n"
79                         "       casl %2,%1,%0\n"
80                         "       jne 1b"
81                         : "+m" (*v), "=&d" (t), "=&d" (tmp)
82                         : "g" (i), "2" (atomic_read(v)));
83         return t;
84 }
85
86 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
87 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
88
89 #else /* !CONFIG_RMW_INSNS */
90
91 static inline int atomic_add_return(int i, atomic_t * v)
92 {
93         unsigned long flags;
94         int t;
95
96         local_irq_save(flags);
97         t = atomic_read(v);
98         t += i;
99         atomic_set(v, t);
100         local_irq_restore(flags);
101
102         return t;
103 }
104
105 static inline int atomic_sub_return(int i, atomic_t * v)
106 {
107         unsigned long flags;
108         int t;
109
110         local_irq_save(flags);
111         t = atomic_read(v);
112         t -= i;
113         atomic_set(v, t);
114         local_irq_restore(flags);
115
116         return t;
117 }
118
119 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
120 {
121         unsigned long flags;
122         int prev;
123
124         local_irq_save(flags);
125         prev = atomic_read(v);
126         if (prev == old)
127                 atomic_set(v, new);
128         local_irq_restore(flags);
129         return prev;
130 }
131
132 static inline int atomic_xchg(atomic_t *v, int new)
133 {
134         unsigned long flags;
135         int prev;
136
137         local_irq_save(flags);
138         prev = atomic_read(v);
139         atomic_set(v, new);
140         local_irq_restore(flags);
141         return prev;
142 }
143
144 #endif /* !CONFIG_RMW_INSNS */
145
146 #define atomic_dec_return(v)    atomic_sub_return(1, (v))
147 #define atomic_inc_return(v)    atomic_add_return(1, (v))
148
149 static inline int atomic_sub_and_test(int i, atomic_t *v)
150 {
151         char c;
152         __asm__ __volatile__("subl %2,%1; seq %0" : "=d" (c), "+m" (*v): "g" (i));
153         return c != 0;
154 }
155
156 static inline int atomic_add_negative(int i, atomic_t *v)
157 {
158         char c;
159         __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
160         return c != 0;
161 }
162
163 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
164 {
165         __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask)));
166 }
167
168 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
169 {
170         __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
171 }
172
173 #define atomic_add_unless(v, a, u)                              \
174 ({                                                              \
175         int c, old;                                             \
176         c = atomic_read(v);                                     \
177         for (;;) {                                              \
178                 if (unlikely(c == (u)))                         \
179                         break;                                  \
180                 old = atomic_cmpxchg((v), c, c + (a));          \
181                 if (likely(old == c))                           \
182                         break;                                  \
183                 c = old;                                        \
184         }                                                       \
185         c != (u);                                               \
186 })
187 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
188
189 /* Atomic operations are already serializing */
190 #define smp_mb__before_atomic_dec()     barrier()
191 #define smp_mb__after_atomic_dec()      barrier()
192 #define smp_mb__before_atomic_inc()     barrier()
193 #define smp_mb__after_atomic_inc()      barrier()
194
195 #include <asm-generic/atomic.h>
196 #endif /* __ARCH_M68K_ATOMIC __ */