/*
- * Copyright (C) 1999, 2001, 02, 03 Ralf Baechle
+ * MIPS-specific semaphore code.
*
- * Heavily inspired by the Alpha implementation
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
+ * to eliminate the SMP races in the old version between the updates
+ * of `count' and `waking'. Now we use negative `count' values to
+ * indicate that some process(es) are waiting for the semaphore.
*/
+
#include <linux/config.h>
-#include <linux/errno.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/atomic.h>
+#include <asm/semaphore.h>
+#include <asm/errno.h>
-#ifdef CONFIG_CPU_HAS_LLDSCD
-/*
- * On machines without lld/scd we need a spinlock to make the manipulation of
- * sem->count and sem->waking atomic. Scalability isn't an issue because
- * this lock is used on UP only so it's just an empty variable.
- */
-spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
-
-EXPORT_SYMBOL(semaphore_lock);
-#endif
+#ifdef CONFIG_CPU_HAS_LLSC
/*
- * Semaphores are implemented using a two-way counter: The "count" variable is
- * decremented for each process that tries to sleep, while the "waking" variable
- * is incremented when the "up()" code goes to wake up waiting processes.
- *
- * Notably, the inline "up()" and "down()" functions can efficiently test if
- * they need to do any extra work (up needs to do something only if count was
- * negative before the increment operation.
- *
- * waking_non_zero() must execute atomically.
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
*
- * When __up() is called, the count was negative before incrementing it, and we
- * need to wake up somebody.
- *
- * This routine adds one to the count of processes that need to wake up and
- * exit. ALL waiting processes actually wake up but only the one that gets to
- * the "waking" field first will gate through and acquire the semaphore. The
- * others will go back to sleep.
- *
- * Note that these functions are only called when there is contention on the
- * lock, and as such all this is the "non-critical" part of the whole semaphore
- * business. The critical part is the inline stuff in <asm/semaphore.h> where
- * we want to avoid any extra jumps and calls.
+ * old_count = sem->count;
+ * tmp = MAX(old_count, 0) + incr;
+ * sem->count = tmp;
+ * return old_count;
*/
-void __up_wakeup(struct semaphore *sem)
+static inline int __sem_update_count(struct semaphore *sem, int incr)
{
- wake_up(&sem->wait);
-}
-
-EXPORT_SYMBOL(__up_wakeup);
-
-#ifdef CONFIG_CPU_HAS_LLSC
-
-static inline int waking_non_zero(struct semaphore *sem)
-{
- int ret, tmp;
+ int old_count, tmp;
__asm__ __volatile__(
- "1: ll %1, %2 # waking_non_zero \n"
- " blez %1, 2f \n"
- " subu %0, %1, 1 \n"
- " sc %0, %2 \n"
- " beqz %0, 1b \n"
- "2: \n"
- : "=r" (ret), "=r" (tmp), "+m" (sem->waking)
- : "0" (0));
-
- return ret;
+ "1: ll %0, %2 \n"
+ " sra %1, %0, 31 \n"
+ " not %1 \n"
+ " and %1, %0, %1 \n"
+ " add %1, %1, %3 \n"
+ " sc %1, %2 \n"
+ " beqz %1, 1b \n"
+ : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+ : "r" (incr), "m" (sem->count));
+
+ return old_count;
}
-#else /* !CONFIG_CPU_HAS_LLSC */
+#else
-static inline int waking_non_zero(struct semaphore *sem)
+/*
+ * On machines without lld/scd we need a spinlock to make the manipulation of
+ * sem->count and sem->waking atomic. Scalability isn't an issue because
+ * this lock is used on UP only so it's just an empty variable.
+ */
+static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
+
+static inline int __sem_update_count(struct semaphore *sem, int incr)
{
unsigned long flags;
- int waking, ret = 0;
+ int old_count, tmp;
spin_lock_irqsave(&semaphore_lock, flags);
- waking = atomic_read(&sem->waking);
- if (waking > 0) {
- atomic_set(&sem->waking, waking - 1);
- ret = 1;
- }
+ old_count = atomic_read(&sem->count);
+ tmp = max_t(int, old_count, 0) + incr;
+ atomic_set(&sem->count, tmp);
spin_unlock_irqrestore(&semaphore_lock, flags);
- return ret;
+ return old_count;
}
-#endif /* !CONFIG_CPU_HAS_LLSC */
-
-/*
- * Perform the "down" function. Return zero for semaphore acquired, return
- * negative for signalled out of the function.
- *
- * If called from down, the return is ignored and the wait loop is not
- * interruptible. This means that a task waiting on a semaphore using "down()"
- * cannot be killed until someone does an "up()" on the semaphore.
- *
- * If called from down_interruptible, the return value gets checked upon return.
- * If the return value is negative then the task continues with the negative
- * value in the return register (it can be tested by the caller).
- *
- * Either form may be used in conjunction with "up()".
- */
+#endif
-void __sched __down_failed(struct semaphore * sem)
+void __up(struct semaphore *sem)
{
- struct task_struct *tsk = current;
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, tsk);
- __set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue_exclusive(&sem->wait, &wait);
-
/*
- * Ok, we're set up. sem->count is known to be less than zero
- * so we must wait.
- *
- * We can let go the lock for purposes of waiting.
- * We re-acquire it after awaking so as to protect
- * all semaphore operations.
- *
- * If "up()" is called before we call waking_non_zero() then
- * we will catch it right away. If it is called later then
- * we will have to go through a wakeup cycle to catch it.
- *
- * Multiple waiters contend for the semaphore lock to see
- * who gets to gate through and who has to wait some more.
+ * Note that we incremented count in up() before we came here,
+ * but that was ineffective since the result was <= 0, and
+ * any negative value of count is equivalent to 0.
+ * This ends up setting count to 1, unless count is now > 0
+ * (i.e. because some other cpu has called up() in the meantime),
+ * in which case we just increment count.
*/
- for (;;) {
- if (waking_non_zero(sem))
- break;
- schedule();
- __set_current_state(TASK_UNINTERRUPTIBLE);
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(&sem->wait, &wait);
+ __sem_update_count(sem, 1);
+ wake_up(&sem->wait);
}
-EXPORT_SYMBOL(__down_failed);
-
-#ifdef CONFIG_CPU_HAS_LLDSCD
+EXPORT_SYMBOL(__up);
/*
- * waking_non_zero_interruptible:
- * 1 got the lock
- * 0 go to sleep
- * -EINTR interrupted
- *
- * We must undo the sem->count down_interruptible decrement
- * simultaneously and atomically with the sem->waking adjustment,
- * otherwise we can race with wake_one_more.
- *
- * This is accomplished by doing a 64-bit lld/scd on the 2 32-bit words.
- *
- * This is crazy. Normally it's strictly forbidden to use 64-bit operations
- * in the 32-bit MIPS kernel. In this case it's however ok because if an
- * interrupt has destroyed the upper half of registers sc will fail.
- * Note also that this will not work for MIPS32 CPUs!
- *
- * Pseudocode:
- *
- * If(sem->waking > 0) {
- * Decrement(sem->waking)
- * Return(SUCCESS)
- * } else If(signal_pending(tsk)) {
- * Increment(sem->count)
- * Return(-EINTR)
- * } else {
- * Return(SLEEP)
- * }
+ * Note that when we come in to __down or __down_interruptible,
+ * we have already decremented count, but that decrement was
+ * ineffective since the result was < 0, and any negative value
+ * of count is equivalent to 0.
+ * Thus it is only when we decrement count from some value > 0
+ * that we have actually got the semaphore.
*/
-
-static inline int
-waking_non_zero_interruptible(struct semaphore *sem, struct task_struct *tsk)
-{
- long ret, tmp;
-
- __asm__ __volatile__(
- " .set push # waking_non_zero_interruptible \n"
- " .set mips3 \n"
- " .set noat \n"
- "0: lld %1, %2 \n"
- " li %0, 0 \n"
- " sll $1, %1, 0 \n"
- " blez $1, 1f \n"
- " daddiu %1, %1, -1 \n"
- " li %0, 1 \n"
- " b 2f \n"
- "1: beqz %3, 2f \n"
- " li %0, %4 \n"
- " dli $1, 0x0000000100000000 \n"
- " daddu %1, %1, $1 \n"
- "2: scd %1, %2 \n"
- " beqz %1, 0b \n"
- " .set pop \n"
- : "=&r" (ret), "=&r" (tmp), "=m" (*sem)
- : "r" (signal_pending(tsk)), "i" (-EINTR));
-
- return ret;
-}
-
-#else /* !CONFIG_CPU_HAS_LLDSCD */
-
-static inline int waking_non_zero_interruptible(struct semaphore *sem,
- struct task_struct *tsk)
+void __sched __down(struct semaphore *sem)
{
- int waking, pending, ret = 0;
- unsigned long flags;
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
- pending = signal_pending(tsk);
+ __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ add_wait_queue_exclusive(&sem->wait, &wait);
- spin_lock_irqsave(&semaphore_lock, flags);
- waking = atomic_read(&sem->waking);
- if (waking > 0) {
- atomic_set(&sem->waking, waking - 1);
- ret = 1;
- } else if (pending) {
- atomic_set(&sem->count, atomic_read(&sem->count) + 1);
- ret = -EINTR;
+ /*
+ * Try to get the semaphore. If the count is > 0, then we've
+ * got the semaphore; we decrement count and exit the loop.
+ * If the count is 0 or negative, we set it to -1, indicating
+ * that we are asleep, and then sleep.
+ */
+ while (__sem_update_count(sem, -1) <= 0) {
+ schedule();
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
- spin_unlock_irqrestore(&semaphore_lock, flags);
+ remove_wait_queue(&sem->wait, &wait);
+ __set_task_state(tsk, TASK_RUNNING);
- return ret;
+ /*
+ * If there are any more sleepers, wake one of them up so
+ * that it can either get the semaphore, or set count to -1
+ * indicating that there are still processes sleeping.
+ */
+ wake_up(&sem->wait);
}
-#endif /* !CONFIG_CPU_HAS_LLDSCD */
+EXPORT_SYMBOL(__down);
-int __sched __down_failed_interruptible(struct semaphore * sem)
+int __sched __down_interruptible(struct semaphore * sem)
{
+ int retval = 0;
struct task_struct *tsk = current;
- wait_queue_t wait;
- int ret = 0;
+ DECLARE_WAITQUEUE(wait, tsk);
- init_waitqueue_entry(&wait, tsk);
- __set_current_state(TASK_INTERRUPTIBLE);
+ __set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
- /*
- * Ok, we're set up. sem->count is known to be less than zero
- * so we must wait.
- *
- * We can let go the lock for purposes of waiting.
- * We re-acquire it after awaking so as to protect
- * all semaphore operations.
- *
- * If "up()" is called before we call waking_non_zero() then
- * we will catch it right away. If it is called later then
- * we will have to go through a wakeup cycle to catch it.
- *
- * Multiple waiters contend for the semaphore lock to see
- * who gets to gate through and who has to wait some more.
- */
- for (;;) {
- ret = waking_non_zero_interruptible(sem, tsk);
- if (ret) {
- if (ret == 1)
- /* ret != 0 only if we get interrupted -arca */
- ret = 0;
+ while (__sem_update_count(sem, -1) <= 0) {
+ if (signal_pending(current)) {
+ /*
+ * A signal is pending - give up trying.
+ * Set sem->count to 0 if it is negative,
+ * since we are no longer sleeping.
+ */
+ __sem_update_count(sem, 0);
+ retval = -EINTR;
break;
}
schedule();
- __set_current_state(TASK_INTERRUPTIBLE);
+ set_task_state(tsk, TASK_INTERRUPTIBLE);
}
- __set_current_state(TASK_RUNNING);
remove_wait_queue(&sem->wait, &wait);
+ __set_task_state(tsk, TASK_RUNNING);
- return ret;
+ wake_up(&sem->wait);
+ return retval;
}
-EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__down_interruptible);