1 #ifndef _ASM_M32R_SPINLOCK_H
2 #define _ASM_M32R_SPINLOCK_H
5 * linux/include/asm-m32r/spinlock.h
8 * Copyright (C) 2001, 2002 Hitoshi Yamamoto
9 * Copyright (C) 2004 Hirokazu Takata <takata at linux-m32r.org>
12 #include <linux/config.h> /* CONFIG_DEBUG_SPINLOCK, CONFIG_SMP */
13 #include <linux/compiler.h>
14 #include <asm/atomic.h>
17 extern int printk(const char * fmt, ...)
18 __attribute__ ((format (printf, 1, 2)));
20 #define RW_LOCK_BIAS 0x01000000
21 #define RW_LOCK_BIAS_STR "0x01000000"
23 /* It seems that people are forgetting to
24 * initialize their spinlocks properly, tsk tsk.
25 * Remember to turn this off in 2.4. -ben
27 #if defined(CONFIG_DEBUG_SPINLOCK)
28 #define SPINLOCK_DEBUG 1
30 #define SPINLOCK_DEBUG 0
34 * Your basic SMP spinlocks, allowing only a single CPU anywhere
43 unsigned int break_lock;
47 #define SPINLOCK_MAGIC 0xdead4ead
50 #define SPINLOCK_MAGIC_INIT , SPINLOCK_MAGIC
52 #define SPINLOCK_MAGIC_INIT /* */
55 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
57 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
60 * Simple spin lock operations. There are two variants, one clears IRQ's
61 * on the local processor, one does not.
63 * We make no fairness assumptions. They have a cost.
66 #define spin_is_locked(x) (*(volatile int *)(&(x)->lock) <= 0)
67 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
68 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
71 * _raw_spin_trylock - Try spin lock and return a result
72 * @lock: Pointer to the lock variable
74 * _raw_spin_trylock() tries to get the lock and returns a result.
75 * On the m32r, the result value is 1 (= Success) or 0 (= Failure).
77 static inline int _raw_spin_trylock(spinlock_t *lock)
80 unsigned long tmp1, tmp2;
83 * lock->lock : =1 : unlock
86 * oldval = lock->lock; <--+ need atomic operation
87 * lock->lock = 0; <--+
90 __asm__ __volatile__ (
94 "clrpsw #0x40 -> nop; \n\t"
95 DCACHE_CLEAR("%0", "r6", "%3")
97 "unlock %1, @%3; \n\t"
99 : "=&r" (oldval), "=&r" (tmp1), "=&r" (tmp2)
102 #ifdef CONFIG_CHIP_M32700_TS1
104 #endif /* CONFIG_CHIP_M32700_TS1 */
110 static inline void _raw_spin_lock(spinlock_t *lock)
112 unsigned long tmp0, tmp1;
117 if (lock->magic != SPINLOCK_MAGIC) {
118 printk("eip: %p\n", &&here);
123 * lock->lock : =1 : unlock
127 * lock->lock -= 1; <-- need atomic operation
128 * if (lock->lock == 0) break;
129 * for ( ; lock->lock <= 0 ; );
132 __asm__ __volatile__ (
137 "clrpsw #0x40 -> nop; \n\t"
138 DCACHE_CLEAR("%0", "r6", "%2")
141 "unlock %0, @%2; \n\t"
144 LOCK_SECTION_START(".balign 4 \n\t")
151 : "=&r" (tmp0), "=&r" (tmp1)
154 #ifdef CONFIG_CHIP_M32700_TS1
156 #endif /* CONFIG_CHIP_M32700_TS1 */
160 static inline void _raw_spin_unlock(spinlock_t *lock)
163 BUG_ON(lock->magic != SPINLOCK_MAGIC);
164 BUG_ON(!spin_is_locked(lock));
171 * Read-write spinlocks, allowing multiple readers
172 * but only one writer.
174 * NOTE! it is quite common to have readers in interrupts
175 * but no interrupt writers. For those circumstances we
176 * can "mix" irq-safe locks - any writer needs to get a
177 * irq-safe write-lock, but readers can get non-irqsafe
185 #ifdef CONFIG_PREEMPT
186 unsigned int break_lock;
190 #define RWLOCK_MAGIC 0xdeaf1eed
193 #define RWLOCK_MAGIC_INIT , RWLOCK_MAGIC
195 #define RWLOCK_MAGIC_INIT /* */
198 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
200 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
202 #define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
205 * On x86, we implement read-write locks as a 32-bit counter
206 * with the high bit (sign) being the "contended" bit.
208 * The inline assembly is non-obvious. Think about it.
210 * Changed to use the same technique as rw semaphores. See
211 * semaphore.h for details. -ben
213 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
215 static inline void _raw_read_lock(rwlock_t *rw)
217 unsigned long tmp0, tmp1;
220 BUG_ON(rw->magic != RWLOCK_MAGIC);
223 * rw->lock : >0 : unlock
227 * rw->lock -= 1; <-- need atomic operation
228 * if (rw->lock >= 0) break;
229 * rw->lock += 1; <-- need atomic operation
230 * for ( ; rw->lock <= 0 ; );
233 __asm__ __volatile__ (
238 "clrpsw #0x40 -> nop; \n\t"
239 DCACHE_CLEAR("%0", "r6", "%2")
242 "unlock %0, @%2; \n\t"
245 LOCK_SECTION_START(".balign 4 \n\t")
248 "clrpsw #0x40 -> nop; \n\t"
249 DCACHE_CLEAR("%0", "r6", "%2")
252 "unlock %0, @%2; \n\t"
260 : "=&r" (tmp0), "=&r" (tmp1)
263 #ifdef CONFIG_CHIP_M32700_TS1
265 #endif /* CONFIG_CHIP_M32700_TS1 */
269 static inline void _raw_write_lock(rwlock_t *rw)
271 unsigned long tmp0, tmp1, tmp2;
274 BUG_ON(rw->magic != RWLOCK_MAGIC);
277 * rw->lock : =RW_LOCK_BIAS_STR : unlock
278 * : !=RW_LOCK_BIAS_STR : lock
281 * rw->lock -= RW_LOCK_BIAS_STR; <-- need atomic operation
282 * if (rw->lock == 0) break;
283 * rw->lock += RW_LOCK_BIAS_STR; <-- need atomic operation
284 * for ( ; rw->lock != RW_LOCK_BIAS_STR ; ) ;
287 __asm__ __volatile__ (
289 "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
290 "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
294 "clrpsw #0x40 -> nop; \n\t"
295 DCACHE_CLEAR("%0", "r7", "%3")
298 "unlock %0, @%3; \n\t"
301 LOCK_SECTION_START(".balign 4 \n\t")
304 "clrpsw #0x40 -> nop; \n\t"
305 DCACHE_CLEAR("%0", "r7", "%3")
308 "unlock %0, @%3; \n\t"
313 "beq %0, %1, 1b; \n\t"
316 : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
319 #ifdef CONFIG_CHIP_M32700_TS1
321 #endif /* CONFIG_CHIP_M32700_TS1 */
325 static inline void _raw_read_unlock(rwlock_t *rw)
327 unsigned long tmp0, tmp1;
329 __asm__ __volatile__ (
332 "clrpsw #0x40 -> nop; \n\t"
333 DCACHE_CLEAR("%0", "r6", "%2")
336 "unlock %0, @%2; \n\t"
338 : "=&r" (tmp0), "=&r" (tmp1)
341 #ifdef CONFIG_CHIP_M32700_TS1
343 #endif /* CONFIG_CHIP_M32700_TS1 */
347 static inline void _raw_write_unlock(rwlock_t *rw)
349 unsigned long tmp0, tmp1, tmp2;
351 __asm__ __volatile__ (
352 "# write_unlock \n\t"
353 "seth %1, #high(" RW_LOCK_BIAS_STR "); \n\t"
354 "or3 %1, %1, #low(" RW_LOCK_BIAS_STR "); \n\t"
356 "clrpsw #0x40 -> nop; \n\t"
357 DCACHE_CLEAR("%0", "r7", "%3")
360 "unlock %0, @%3; \n\t"
362 : "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2)
365 #ifdef CONFIG_CHIP_M32700_TS1
367 #endif /* CONFIG_CHIP_M32700_TS1 */
371 #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
373 static inline int _raw_write_trylock(rwlock_t *lock)
375 atomic_t *count = (atomic_t *)lock;
376 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
378 atomic_add(RW_LOCK_BIAS, count);
382 #endif /* _ASM_M32R_SPINLOCK_H */