2 * MIPS-specific semaphore code.
4 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
5 * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
13 * to eliminate the SMP races in the old version between the updates
14 * of `count' and `waking'. Now we use negative `count' values to
15 * indicate that some process(es) are waiting for the semaphore.
18 #include <linux/config.h>
19 #include <linux/module.h>
20 #include <linux/sched.h>
21 #include <linux/init.h>
22 #include <asm/atomic.h>
23 #include <asm/semaphore.h>
24 #include <asm/errno.h>
26 #ifdef CONFIG_CPU_HAS_LLSC
29 * Atomically update sem->count.
30 * This does the equivalent of the following:
32 * old_count = sem->count;
33 * tmp = MAX(old_count, 0) + incr;
37 static inline int __sem_update_count(struct semaphore *sem, int incr)
49 : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
50 : "r" (incr), "m" (sem->count));
58 * On machines without lld/scd we need a spinlock to make the manipulation of
59 * sem->count and sem->waking atomic. Scalability isn't an issue because
60 * this lock is used on UP only so it's just an empty variable.
62 static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
64 static inline int __sem_update_count(struct semaphore *sem, int incr)
69 spin_lock_irqsave(&semaphore_lock, flags);
70 old_count = atomic_read(&sem->count);
71 tmp = max_t(int, old_count, 0) + incr;
72 atomic_set(&sem->count, tmp);
73 spin_unlock_irqrestore(&semaphore_lock, flags);
80 void __up(struct semaphore *sem)
83 * Note that we incremented count in up() before we came here,
84 * but that was ineffective since the result was <= 0, and
85 * any negative value of count is equivalent to 0.
86 * This ends up setting count to 1, unless count is now > 0
87 * (i.e. because some other cpu has called up() in the meantime),
88 * in which case we just increment count.
90 __sem_update_count(sem, 1);
97 * Note that when we come in to __down or __down_interruptible,
98 * we have already decremented count, but that decrement was
99 * ineffective since the result was < 0, and any negative value
100 * of count is equivalent to 0.
101 * Thus it is only when we decrement count from some value > 0
102 * that we have actually got the semaphore.
104 void __sched __down(struct semaphore *sem)
106 struct task_struct *tsk = current;
107 DECLARE_WAITQUEUE(wait, tsk);
109 __set_task_state(tsk, TASK_UNINTERRUPTIBLE);
110 add_wait_queue_exclusive(&sem->wait, &wait);
113 * Try to get the semaphore. If the count is > 0, then we've
114 * got the semaphore; we decrement count and exit the loop.
115 * If the count is 0 or negative, we set it to -1, indicating
116 * that we are asleep, and then sleep.
118 while (__sem_update_count(sem, -1) <= 0) {
120 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
122 remove_wait_queue(&sem->wait, &wait);
123 __set_task_state(tsk, TASK_RUNNING);
126 * If there are any more sleepers, wake one of them up so
127 * that it can either get the semaphore, or set count to -1
128 * indicating that there are still processes sleeping.
133 EXPORT_SYMBOL(__down);
135 int __sched __down_interruptible(struct semaphore * sem)
138 struct task_struct *tsk = current;
139 DECLARE_WAITQUEUE(wait, tsk);
141 __set_task_state(tsk, TASK_INTERRUPTIBLE);
142 add_wait_queue_exclusive(&sem->wait, &wait);
144 while (__sem_update_count(sem, -1) <= 0) {
145 if (signal_pending(current)) {
147 * A signal is pending - give up trying.
148 * Set sem->count to 0 if it is negative,
149 * since we are no longer sleeping.
151 __sem_update_count(sem, 0);
156 set_task_state(tsk, TASK_INTERRUPTIBLE);
158 remove_wait_queue(&sem->wait, &wait);
159 __set_task_state(tsk, TASK_RUNNING);
165 EXPORT_SYMBOL(__down_interruptible);