2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999, 2000 by Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 #ifndef _ASM_SPINLOCK_H
10 #define _ASM_SPINLOCK_H
15 * Your basic SMP spinlocks, allowing only a single CPU anywhere
19 volatile unsigned int lock;
22 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
24 #define spin_lock_init(x) do { (x)->lock = 0; } while(0)
26 #define spin_is_locked(x) ((x)->lock != 0)
27 #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
28 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
31 * Simple spin lock operations. There are two variants, one clears IRQ's
32 * on the local processor, one does not.
34 * We make no fairness assumptions. They have a cost.
37 static inline void _raw_spin_lock(spinlock_t *lock)
41 if (R10000_LLSC_WAR) {
43 " .set noreorder # _raw_spin_lock \n"
52 : "=m" (lock->lock), "=&r" (tmp)
57 " .set noreorder # _raw_spin_lock \n"
65 : "=m" (lock->lock), "=&r" (tmp)
71 static inline void _raw_spin_unlock(spinlock_t *lock)
74 " .set noreorder # _raw_spin_unlock \n"
83 static inline unsigned int _raw_spin_trylock(spinlock_t *lock)
85 unsigned int temp, res;
87 if (R10000_LLSC_WAR) {
89 " .set noreorder # _raw_spin_trylock \n"
98 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
102 __asm__ __volatile__(
103 " .set noreorder # _raw_spin_trylock \n"
111 : "=&r" (temp), "=m" (lock->lock), "=&r" (res)
120 * Read-write spinlocks, allowing multiple readers but only one writer.
122 * NOTE! it is quite common to have readers in interrupts but no interrupt
123 * writers. For those circumstances we can "mix" irq-safe locks - any writer
124 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
129 volatile unsigned int lock;
132 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
134 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
136 #define rwlock_is_locked(x) ((x)->lock)
138 static inline void _raw_read_lock(rwlock_t *rw)
142 if (R10000_LLSC_WAR) {
143 __asm__ __volatile__(
144 " .set noreorder # _raw_read_lock \n"
153 : "=m" (rw->lock), "=&r" (tmp)
157 __asm__ __volatile__(
158 " .set noreorder # _raw_read_lock \n"
166 : "=m" (rw->lock), "=&r" (tmp)
172 /* Note the use of sub, not subu which will make the kernel die with an
173 overflow exception if we ever try to unlock an rwlock that is already
174 unlocked or is being held by a writer. */
175 static inline void _raw_read_unlock(rwlock_t *rw)
179 if (R10000_LLSC_WAR) {
180 __asm__ __volatile__(
181 "1: ll %1, %2 # _raw_read_unlock \n"
186 : "=m" (rw->lock), "=&r" (tmp)
190 __asm__ __volatile__(
191 " .set noreorder # _raw_read_unlock \n"
198 : "=m" (rw->lock), "=&r" (tmp)
204 static inline void _raw_write_lock(rwlock_t *rw)
208 if (R10000_LLSC_WAR) {
209 __asm__ __volatile__(
210 " .set noreorder # _raw_write_lock \n"
219 : "=m" (rw->lock), "=&r" (tmp)
223 __asm__ __volatile__(
224 " .set noreorder # _raw_write_lock \n"
233 : "=m" (rw->lock), "=&r" (tmp)
239 static inline void _raw_write_unlock(rwlock_t *rw)
241 __asm__ __volatile__(
242 " sync # _raw_write_unlock \n"
249 static inline int _raw_write_trylock(rwlock_t *rw)
254 if (R10000_LLSC_WAR) {
255 __asm__ __volatile__(
256 " .set noreorder # _raw_write_trylock \n"
268 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
272 __asm__ __volatile__(
273 " .set noreorder # _raw_write_trylock \n"
284 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
292 #endif /* _ASM_SPINLOCK_H */