2 * Copyright (C) 1999, 2001, 02, 03 Ralf Baechle
4 * Heavily inspired by the Alpha implementation
6 #include <linux/config.h>
7 #include <linux/errno.h>
8 #include <linux/module.h>
9 #include <linux/init.h>
10 #include <linux/sched.h>
12 #ifdef CONFIG_CPU_HAS_LLDSCD
14 * On machines without lld/scd we need a spinlock to make the manipulation of
15 * sem->count and sem->waking atomic. Scalability isn't an issue because
16 * this lock is used on UP only so it's just an empty variable.
18 spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
20 EXPORT_SYMBOL(semaphore_lock);
24 * Semaphores are implemented using a two-way counter: The "count" variable is
25 * decremented for each process that tries to sleep, while the "waking" variable
26 * is incremented when the "up()" code goes to wake up waiting processes.
28 * Notably, the inline "up()" and "down()" functions can efficiently test if
29 * they need to do any extra work (up needs to do something only if count was
30 * negative before the increment operation.
32 * waking_non_zero() must execute atomically.
34 * When __up() is called, the count was negative before incrementing it, and we
35 * need to wake up somebody.
37 * This routine adds one to the count of processes that need to wake up and
38 * exit. ALL waiting processes actually wake up but only the one that gets to
39 * the "waking" field first will gate through and acquire the semaphore. The
40 * others will go back to sleep.
42 * Note that these functions are only called when there is contention on the
43 * lock, and as such all this is the "non-critical" part of the whole semaphore
44 * business. The critical part is the inline stuff in <asm/semaphore.h> where
45 * we want to avoid any extra jumps and calls.
47 void __up_wakeup(struct semaphore *sem)
52 EXPORT_SYMBOL(__up_wakeup);
54 #ifdef CONFIG_CPU_HAS_LLSC
56 static inline int waking_non_zero(struct semaphore *sem)
61 "1: ll %1, %2 # waking_non_zero \n"
67 : "=r" (ret), "=r" (tmp), "+m" (sem->waking)
73 #else /* !CONFIG_CPU_HAS_LLSC */
75 static inline int waking_non_zero(struct semaphore *sem)
80 spin_lock_irqsave(&semaphore_lock, flags);
81 waking = atomic_read(&sem->waking);
83 atomic_set(&sem->waking, waking - 1);
86 spin_unlock_irqrestore(&semaphore_lock, flags);
91 #endif /* !CONFIG_CPU_HAS_LLSC */
94 * Perform the "down" function. Return zero for semaphore acquired, return
95 * negative for signalled out of the function.
97 * If called from down, the return is ignored and the wait loop is not
98 * interruptible. This means that a task waiting on a semaphore using "down()"
99 * cannot be killed until someone does an "up()" on the semaphore.
101 * If called from down_interruptible, the return value gets checked upon return.
102 * If the return value is negative then the task continues with the negative
103 * value in the return register (it can be tested by the caller).
105 * Either form may be used in conjunction with "up()".
108 void __sched __down_failed(struct semaphore * sem)
110 struct task_struct *tsk = current;
113 init_waitqueue_entry(&wait, tsk);
114 __set_current_state(TASK_UNINTERRUPTIBLE);
115 add_wait_queue_exclusive(&sem->wait, &wait);
118 * Ok, we're set up. sem->count is known to be less than zero
121 * We can let go the lock for purposes of waiting.
122 * We re-acquire it after awaking so as to protect
123 * all semaphore operations.
125 * If "up()" is called before we call waking_non_zero() then
126 * we will catch it right away. If it is called later then
127 * we will have to go through a wakeup cycle to catch it.
129 * Multiple waiters contend for the semaphore lock to see
130 * who gets to gate through and who has to wait some more.
133 if (waking_non_zero(sem))
136 __set_current_state(TASK_UNINTERRUPTIBLE);
138 __set_current_state(TASK_RUNNING);
139 remove_wait_queue(&sem->wait, &wait);
142 EXPORT_SYMBOL(__down_failed);
144 #ifdef CONFIG_CPU_HAS_LLDSCD
147 * waking_non_zero_interruptible:
152 * We must undo the sem->count down_interruptible decrement
153 * simultaneously and atomically with the sem->waking adjustment,
154 * otherwise we can race with wake_one_more.
156 * This is accomplished by doing a 64-bit lld/scd on the 2 32-bit words.
158 * This is crazy. Normally it's strictly forbidden to use 64-bit operations
159 * in the 32-bit MIPS kernel. In this case it's however ok because if an
160 * interrupt has destroyed the upper half of registers sc will fail.
161 * Note also that this will not work for MIPS32 CPUs!
165 * If(sem->waking > 0) {
166 * Decrement(sem->waking)
168 * } else If(signal_pending(tsk)) {
169 * Increment(sem->count)
177 waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
181 __asm__ __volatile__(
182 " .set push # waking_non_zero_interruptible \n"
189 " daddiu %1, %1, -1 \n"
194 " dli $1, 0x0000000100000000 \n"
195 " daddu %1, %1, $1 \n"
199 : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
200 : "r" (signal_pending(tsk)), "i" (-EINTR));
205 #else /* !CONFIG_CPU_HAS_LLDSCD */
207 static inline int waking_non_zero_interruptible(struct semaphore *sem,
208 struct task_struct *tsk)
210 int waking, pending, ret = 0;
213 pending = signal_pending(tsk);
215 spin_lock_irqsave(&semaphore_lock, flags);
216 waking = atomic_read(&sem->waking);
218 atomic_set(&sem->waking, waking - 1);
220 } else if (pending) {
221 atomic_set(&sem->count, atomic_read(&sem->count) + 1);
224 spin_unlock_irqrestore(&semaphore_lock, flags);
229 #endif /* !CONFIG_CPU_HAS_LLDSCD */
231 int __sched __down_failed_interruptible(struct semaphore * sem)
233 struct task_struct *tsk = current;
237 init_waitqueue_entry(&wait, tsk);
238 __set_current_state(TASK_INTERRUPTIBLE);
239 add_wait_queue_exclusive(&sem->wait, &wait);
242 * Ok, we're set up. sem->count is known to be less than zero
245 * We can let go the lock for purposes of waiting.
246 * We re-acquire it after awaking so as to protect
247 * all semaphore operations.
249 * If "up()" is called before we call waking_non_zero() then
250 * we will catch it right away. If it is called later then
251 * we will have to go through a wakeup cycle to catch it.
253 * Multiple waiters contend for the semaphore lock to see
254 * who gets to gate through and who has to wait some more.
257 ret = waking_non_zero_interruptible(sem, tsk);
260 /* ret != 0 only if we get interrupted -arca */
265 __set_current_state(TASK_INTERRUPTIBLE);
267 __set_current_state(TASK_RUNNING);
268 remove_wait_queue(&sem->wait, &wait);
273 EXPORT_SYMBOL(__down_failed_interruptible);