vserver 1.9.5.x5
[linux-2.6.git] / include / asm-m32r / spinlock.h
1 #ifndef _ASM_M32R_SPINLOCK_H
2 #define _ASM_M32R_SPINLOCK_H
3
4 /*
5  *  linux/include/asm-m32r/spinlock.h
6  *
7  *  M32R version:
8  *    Copyright (C) 2001, 2002  Hitoshi Yamamoto
9  *    Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
10  */
11
12 #include <linux/config.h>       /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
13 #include <linux/compiler.h>
14 #include <asm/atomic.h>
15 #include <asm/page.h>
16
17 extern int printk(const char * fmt, ...)
18         __attribute__ ((format (printf, 1, 2)));
19
20 #define RW_LOCK_BIAS             0x01000000
21 #define RW_LOCK_BIAS_STR        "0x01000000"
22
23 /*
24  * Your basic SMP spinlocks, allowing only a single CPU anywhere
25  */
26
27 typedef struct {
28         volatile int slock;
29 #ifdef CONFIG_DEBUG_SPINLOCK
30         unsigned magic;
31 #endif
32 #ifdef CONFIG_PREEMPT
33         unsigned int break_lock;
34 #endif
35 } spinlock_t;
36
37 #define SPINLOCK_MAGIC  0xdead4ead
38
39 #ifdef CONFIG_DEBUG_SPINLOCK
40 #define SPINLOCK_MAGIC_INIT     , SPINLOCK_MAGIC
41 #else
42 #define SPINLOCK_MAGIC_INIT     /* */
43 #endif
44
45 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
46
47 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
48
49 /*
50  * Simple spin lock operations.  There are two variants, one clears IRQ's
51  * on the local processor, one does not.
52  *
53  * We make no fairness assumptions. They have a cost.
54  */
55
56 #define spin_is_locked(x)       (*(volatile int *)(&(x)->slock) <= 0)
57 #define spin_unlock_wait(x)     do { barrier(); } while(spin_is_locked(x))
58 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
59
60 /**
61  * _raw_spin_trylock - Try spin lock and return a result
62  * @lock: Pointer to the lock variable
63  *
64  * _raw_spin_trylock() tries to get the lock and returns a result.
65  * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
66  */
67 static inline int _raw_spin_trylock(spinlock_t *lock)
68 {
69         int oldval;
70         unsigned long tmp1, tmp2;
71
72         /*
73          * lock->slock :  =1 : unlock
74          *             : <=0 : lock
75          * {
76          *   oldval = lock->slock; <--+ need atomic operation
77          *   lock->slock = 0;      <--+
78          * }
79          */
80         __asm__ __volatile__ (
81                 "# spin_trylock                 \n\t"
82                 "ldi    %1, #0;                 \n\t"
83                 "mvfc   %2, psw;                \n\t"
84                 "clrpsw #0x40 -> nop;           \n\t"
85                 DCACHE_CLEAR("%0", "r6", "%3")
86                 "lock   %0, @%3;                \n\t"
87                 "unlock %1, @%3;                \n\t"
88                 "mvtc   %2, psw;                \n\t"
89                 : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
90                 : "r" (&lock->slock)
91                 : "memory"
92 #ifdef CONFIG_CHIP_M32700_TS1
93                 , "r6"
94 #endif  /* CONFIG_CHIP_M32700_TS1 */
95         );
96
97         return (oldval > 0);
98 }
99
100 static inline void _raw_spin_lock(spinlock_t *lock)
101 {
102         unsigned long tmp0, tmp1;
103
104 #ifdef CONFIG_DEBUG_SPINLOCK
105         __label__ here;
106 here:
107         if (lock->magic != SPINLOCK_MAGIC) {
108                 printk("pc: %p\n", &&here);
109                 BUG();
110         }
111 #endif
112         /*
113          * lock->slock :  =1 : unlock
114          *             : <=0 : lock
115          *
116          * for ( ; ; ) {
117          *   lock->slock -= 1;  <-- need atomic operation
118          *   if (lock->slock == 0) break;
119          *   for ( ; lock->slock <= 0 ; );
120          * }
121          */
122         __asm__ __volatile__ (
123                 "# spin_lock                    \n\t"
124                 ".fillinsn                      \n"
125                 "1:                             \n\t"
126                 "mvfc   %1, psw;                \n\t"
127                 "clrpsw #0x40 -> nop;           \n\t"
128                 DCACHE_CLEAR("%0", "r6", "%2")
129                 "lock   %0, @%2;                \n\t"
130                 "addi   %0, #-1;                \n\t"
131                 "unlock %0, @%2;                \n\t"
132                 "mvtc   %1, psw;                \n\t"
133                 "bltz   %0, 2f;                 \n\t"
134                 LOCK_SECTION_START(".balign 4 \n\t")
135                 ".fillinsn                      \n"
136                 "2:                             \n\t"
137                 "ld     %0, @%2;                \n\t"
138                 "bgtz   %0, 1b;                 \n\t"
139                 "bra    2b;                     \n\t"
140                 LOCK_SECTION_END
141                 : "=&r" (tmp0), "=&r" (tmp1)
142                 : "r" (&lock->slock)
143                 : "memory"
144 #ifdef CONFIG_CHIP_M32700_TS1
145                 , "r6"
146 #endif  /* CONFIG_CHIP_M32700_TS1 */
147         );
148 }
149
150 static inline void _raw_spin_unlock(spinlock_t *lock)
151 {
152 #ifdef CONFIG_DEBUG_SPINLOCK
153         BUG_ON(lock->magic != SPINLOCK_MAGIC);
154         BUG_ON(!spin_is_locked(lock));
155 #endif
156         mb();
157         lock->slock = 1;
158 }
159
160 /*
161  * Read-write spinlocks, allowing multiple readers
162  * but only one writer.
163  *
164  * NOTE! it is quite common to have readers in interrupts
165  * but no interrupt writers. For those circumstances we
166  * can "mix" irq-safe locks - any writer needs to get a
167  * irq-safe write-lock, but readers can get non-irqsafe
168  * read-locks.
169  */
170 typedef struct {
171         volatile int lock;
172 #ifdef CONFIG_DEBUG_SPINLOCK
173         unsigned magic;
174 #endif
175 #ifdef CONFIG_PREEMPT
176         unsigned int break_lock;
177 #endif
178 } rwlock_t;
179
180 #define RWLOCK_MAGIC    0xdeaf1eed
181
182 #ifdef CONFIG_DEBUG_SPINLOCK
183 #define RWLOCK_MAGIC_INIT       , RWLOCK_MAGIC
184 #else
185 #define RWLOCK_MAGIC_INIT       /* */
186 #endif
187
188 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
189
190 #define rwlock_init(x)  do { *(x) = RW_LOCK_UNLOCKED; } while(0)
191
192 /**
193  * read_can_lock - would read_trylock() succeed?
194  * @lock: the rwlock in question.
195  */
196 #define read_can_lock(x) ((int)(x)->lock > 0)
197
198 /**
199  * write_can_lock - would write_trylock() succeed?
200  * @lock: the rwlock in question.
201  */
202 #define write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
203
204 /*
205  * On x86, we implement read-write locks as a 32-bit counter
206  * with the high bit (sign) being the "contended" bit.
207  *
208  * The inline assembly is non-obvious. Think about it.
209  *
210  * Changed to use the same technique as rw semaphores.  See
211  * semaphore.h for details.  -ben
212  */
213 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
214
215 static inline void _raw_read_lock(rwlock_t *rw)
216 {
217         unsigned long tmp0, tmp1;
218
219 #ifdef CONFIG_DEBUG_SPINLOCK
220         BUG_ON(rw->magic != RWLOCK_MAGIC);
221 #endif
222         /*
223          * rw->lock :  >0 : unlock
224          *          : <=0 : lock
225          *
226          * for ( ; ; ) {
227          *   rw->lock -= 1;  <-- need atomic operation
228          *   if (rw->lock >= 0) break;
229          *   rw->lock += 1;  <-- need atomic operation
230          *   for ( ; rw->lock <= 0 ; );
231          * }
232          */
233         __asm__ __volatile__ (
234                 "# read_lock                    \n\t"
235                 ".fillinsn                      \n"
236                 "1:                             \n\t"
237                 "mvfc   %1, psw;                \n\t"
238                 "clrpsw #0x40 -> nop;           \n\t"
239                 DCACHE_CLEAR("%0", "r6", "%2")
240                 "lock   %0, @%2;                \n\t"
241                 "addi   %0, #-1;                \n\t"
242                 "unlock %0, @%2;                \n\t"
243                 "mvtc   %1, psw;                \n\t"
244                 "bltz   %0, 2f;                 \n\t"
245                 LOCK_SECTION_START(".balign 4 \n\t")
246                 ".fillinsn                      \n"
247                 "2:                             \n\t"
248                 "clrpsw #0x40 -> nop;           \n\t"
249                 DCACHE_CLEAR("%0", "r6", "%2")
250                 "lock   %0, @%2;                \n\t"
251                 "addi   %0, #1;                 \n\t"
252                 "unlock %0, @%2;                \n\t"
253                 "mvtc   %1, psw;                \n\t"
254                 ".fillinsn                      \n"
255                 "3:                             \n\t"
256                 "ld     %0, @%2;                \n\t"
257                 "bgtz   %0, 1b;                 \n\t"
258                 "bra    3b;                     \n\t"
259                 LOCK_SECTION_END
260                 : "=&r" (tmp0), "=&r" (tmp1)
261                 : "r" (&rw->lock)
262                 : "memory"
263 #ifdef CONFIG_CHIP_M32700_TS1
264                 , "r6"
265 #endif  /* CONFIG_CHIP_M32700_TS1 */
266         );
267 }
268
269 static inline void _raw_write_lock(rwlock_t *rw)
270 {
271         unsigned long tmp0, tmp1, tmp2;
272
273 #ifdef CONFIG_DEBUG_SPINLOCK
274         BUG_ON(rw->magic != RWLOCK_MAGIC);
275 #endif
276         /*
277          * rw->lock :  =RW_LOCK_BIAS_STR : unlock
278          *          : !=RW_LOCK_BIAS_STR : lock
279          *
280          * for ( ; ; ) {
281          *   rw->lock -= RW_LOCK_BIAS_STR;  <-- need atomic operation
282          *   if (rw->lock == 0) break;
283          *   rw->lock += RW_LOCK_BIAS_STR;  <-- need atomic operation
284          *   for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
285          * }
286          */
287         __asm__ __volatile__ (
288                 "# write_lock                                   \n\t"
289                 "seth   %1, #high(" RW_LOCK_BIAS_STR ");        \n\t"
290                 "or3    %1, %1, #low(" RW_LOCK_BIAS_STR ");     \n\t"
291                 ".fillinsn                                      \n"
292                 "1:                                             \n\t"
293                 "mvfc   %2, psw;                                \n\t"
294                 "clrpsw #0x40 -> nop;                           \n\t"
295                 DCACHE_CLEAR("%0", "r7", "%3")
296                 "lock   %0, @%3;                                \n\t"
297                 "sub    %0, %1;                                 \n\t"
298                 "unlock %0, @%3;                                \n\t"
299                 "mvtc   %2, psw;                                \n\t"
300                 "bnez   %0, 2f;                                 \n\t"
301                 LOCK_SECTION_START(".balign 4 \n\t")
302                 ".fillinsn                                      \n"
303                 "2:                                             \n\t"
304                 "clrpsw #0x40 -> nop;                           \n\t"
305                 DCACHE_CLEAR("%0", "r7", "%3")
306                 "lock   %0, @%3;                                \n\t"
307                 "add    %0, %1;                                 \n\t"
308                 "unlock %0, @%3;                                \n\t"
309                 "mvtc   %2, psw;                                \n\t"
310                 ".fillinsn                                      \n"
311                 "3:                                             \n\t"
312                 "ld     %0, @%3;                                \n\t"
313                 "beq    %0, %1, 1b;                             \n\t"
314                 "bra    3b;                                     \n\t"
315                 LOCK_SECTION_END
316                 : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
317                 : "r" (&rw->lock)
318                 : "memory"
319 #ifdef CONFIG_CHIP_M32700_TS1
320                 , "r7"
321 #endif  /* CONFIG_CHIP_M32700_TS1 */
322         );
323 }
324
325 static inline void _raw_read_unlock(rwlock_t *rw)
326 {
327         unsigned long tmp0, tmp1;
328
329         __asm__ __volatile__ (
330                 "# read_unlock                  \n\t"
331                 "mvfc   %1, psw;                \n\t"
332                 "clrpsw #0x40 -> nop;           \n\t"
333                 DCACHE_CLEAR("%0", "r6", "%2")
334                 "lock   %0, @%2;                \n\t"
335                 "addi   %0, #1;                 \n\t"
336                 "unlock %0, @%2;                \n\t"
337                 "mvtc   %1, psw;                \n\t"
338                 : "=&r" (tmp0), "=&r" (tmp1)
339                 : "r" (&rw->lock)
340                 : "memory"
341 #ifdef CONFIG_CHIP_M32700_TS1
342                 , "r6"
343 #endif  /* CONFIG_CHIP_M32700_TS1 */
344         );
345 }
346
347 static inline void _raw_write_unlock(rwlock_t *rw)
348 {
349         unsigned long tmp0, tmp1, tmp2;
350
351         __asm__ __volatile__ (
352                 "# write_unlock                                 \n\t"
353                 "seth   %1, #high(" RW_LOCK_BIAS_STR ");        \n\t"
354                 "or3    %1, %1, #low(" RW_LOCK_BIAS_STR ");     \n\t"
355                 "mvfc   %2, psw;                                \n\t"
356                 "clrpsw #0x40 -> nop;                           \n\t"
357                 DCACHE_CLEAR("%0", "r7", "%3")
358                 "lock   %0, @%3;                                \n\t"
359                 "add    %0, %1;                                 \n\t"
360                 "unlock %0, @%3;                                \n\t"
361                 "mvtc   %2, psw;                                \n\t"
362                 : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
363                 : "r" (&rw->lock)
364                 : "memory"
365 #ifdef CONFIG_CHIP_M32700_TS1
366                 , "r7"
367 #endif  /* CONFIG_CHIP_M32700_TS1 */
368         );
369 }
370
371 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
372
373 static inline int _raw_write_trylock(rwlock_t *lock)
374 {
375         atomic_t *count = (atomic_t *)lock;
376         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
377                 return 1;
378         atomic_add(RW_LOCK_BIAS, count);
379         return 0;
380 }
381
382 #endif  /* _ASM_M32R_SPINLOCK_H */