2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1996 Linus Torvalds
7 * Copyright (C) 1998, 99, 2000, 01 Ralf Baechle
8 * Copyright (C) 1999, 2000, 01 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
11 #ifndef _ASM_SEMAPHORE_H
12 #define _ASM_SEMAPHORE_H
14 #include <linux/compiler.h>
15 #include <linux/config.h>
16 #include <linux/spinlock.h>
17 #include <linux/wait.h>
18 #include <linux/rwsem.h>
19 #include <asm/atomic.h>
29 wait_queue_head_t wait;
33 } __attribute__((aligned(8)));
36 # define __SEM_DEBUG_INIT(name) , .__magic = (long)&(name).__magic
38 # define __SEM_DEBUG_INIT(name)
41 #define __SEMAPHORE_INITIALIZER(name,_count) { \
42 .count = ATOMIC_INIT(_count), \
43 .waking = ATOMIC_INIT(0), \
44 .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
45 __SEM_DEBUG_INIT(name) \
48 #define __MUTEX_INITIALIZER(name) __SEMAPHORE_INITIALIZER(name, 1)
50 #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
51 struct semaphore name = __SEMAPHORE_INITIALIZER(name, count)
53 #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1)
54 #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name,0)
56 static inline void sema_init (struct semaphore *sem, int val)
58 atomic_set(&sem->count, val);
59 atomic_set(&sem->waking, 0);
60 init_waitqueue_head(&sem->wait);
62 sem->__magic = (long)&sem->__magic;
66 static inline void init_MUTEX (struct semaphore *sem)
71 static inline void init_MUTEX_LOCKED (struct semaphore *sem)
76 #ifndef CONFIG_CPU_HAS_LLDSCD
78 * On machines without lld/scd we need a spinlock to make the manipulation of
79 * sem->count and sem->waking atomic.
81 extern spinlock_t semaphore_lock;
84 extern void __down_failed(struct semaphore * sem);
85 extern int __down_failed_interruptible(struct semaphore * sem);
86 extern void __up_wakeup(struct semaphore * sem);
88 static inline void down(struct semaphore * sem)
93 CHECK_MAGIC(sem->__magic);
96 count = atomic_dec_return(&sem->count);
97 if (unlikely(count < 0))
102 * Interruptible try to acquire a semaphore. If we obtained
103 * it, return zero. If we were interrupted, returns -EINTR
105 static inline int down_interruptible(struct semaphore * sem)
110 CHECK_MAGIC(sem->__magic);
113 count = atomic_dec_return(&sem->count);
114 if (unlikely(count < 0))
115 return __down_failed_interruptible(sem);
120 #ifdef CONFIG_CPU_HAS_LLDSCD
123 * down_trylock returns 0 on success, 1 if we failed to get the lock.
125 * We must manipulate count and waking simultaneously and atomically.
126 * Here, we do this by using lld/scd on the pair of 32-bit words.
130 * Decrement(sem->count)
131 * If(sem->count >=0) {
132 * Return(SUCCESS) // resource is free
134 * If(sem->waking <= 0) { // if no wakeup pending
135 * Increment(sem->count) // undo decrement
138 * Decrement(sem->waking) // otherwise "steal" wakeup
143 static inline int down_trylock(struct semaphore * sem)
145 long ret, tmp, tmp2, sub;
148 CHECK_MAGIC(sem->__magic);
151 __asm__ __volatile__(
152 " .set mips3 # down_trylock \n"
154 " dli %3, 0x0000000100000000 # count -= 1 \n"
156 " li %0, 0 # ret = 0 \n"
157 " bgez %1, 2f # if count >= 0 \n"
158 " sll %2, %1, 0 # extract waking \n"
159 " blez %2, 1f # if waking < 0 -> 1f \n"
160 " daddiu %1, %1, -1 # waking -= 1 \n"
162 "1: daddu %1, %1, %3 # count += 1 \n"
163 " li %0, 1 # ret = 1 \n"
168 : "=&r"(ret), "=&r"(tmp), "=&r"(tmp2), "=&r"(sub)
176 * Note! This is subtle. We jump to wake people up only if
177 * the semaphore was negative (== somebody was waiting on it).
179 static inline void up(struct semaphore * sem)
181 unsigned long tmp, tmp2;
185 CHECK_MAGIC(sem->__magic);
188 * We must manipulate count and waking simultaneously and atomically.
189 * Otherwise we have races between up and __down_failed_interruptible
190 * waking up on a signal.
193 __asm__ __volatile__(
197 " dsra32 %0, %1, 0 # extract count to %0 \n"
198 " daddiu %0, 1 # count += 1 \n"
199 " slti %2, %0, 1 # %3 = (%0 <= 0) \n"
200 " daddu %1, %2 # waking += %3 \n"
201 " dsll32 %1, %1, 0 # zero-extend %1 \n"
202 " dsrl32 %1, %1, 0 \n"
203 " dsll32 %2, %0, 0 # Reassemble union \n"
204 " or %1, %2 # from count and waking \n"
208 : "=&r"(count), "=&r"(tmp), "=&r"(tmp2), "+m"(*sem)
212 if (unlikely(count <= 0))
219 * Non-blockingly attempt to down() a semaphore.
220 * Returns zero if we acquired it
222 static inline int down_trylock(struct semaphore * sem)
229 CHECK_MAGIC(sem->__magic);
232 spin_lock_irqsave(&semaphore_lock, flags);
233 count = atomic_read(&sem->count) - 1;
234 atomic_set(&sem->count, count);
235 if (unlikely(count < 0)) {
236 waking = atomic_read(&sem->waking);
238 atomic_set(&sem->count, count + 1);
241 atomic_set(&sem->waking, waking - 1);
245 spin_unlock_irqrestore(&semaphore_lock, flags);
251 * Note! This is subtle. We jump to wake people up only if
252 * the semaphore was negative (== somebody was waiting on it).
254 static inline void up(struct semaphore * sem)
260 CHECK_MAGIC(sem->__magic);
263 * We must manipulate count and waking simultaneously and atomically.
264 * Otherwise we have races between up and __down_failed_interruptible
265 * waking up on a signal.
268 spin_lock_irqsave(&semaphore_lock, flags);
269 count = atomic_read(&sem->count) + 1;
270 waking = atomic_read(&sem->waking);
273 atomic_set(&sem->count, count);
274 atomic_set(&sem->waking, waking);
275 spin_unlock_irqrestore(&semaphore_lock, flags);
277 if (unlikely(count <= 0))
281 #endif /* CONFIG_CPU_HAS_LLDSCD */
283 #endif /* _ASM_SEMAPHORE_H */