2 * ARM semaphore implementation, taken from
4 * i386 semaphore implementation.
6 * (C) Copyright 1999 Linus Torvalds
8 * Modified for ARM by Russell King
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/init.h>
18 #include <asm/semaphore.h>
21 * Semaphores are implemented using a two-way counter:
22 * The "count" variable is decremented for each process
23 * that tries to acquire the semaphore, while the "sleeping"
24 * variable is a count of such acquires.
26 * Notably, the inline "up()" and "down()" functions can
27 * efficiently test if they need to do any extra work (up
28 * needs to do something only if count was negative before
29 * the increment operation.
31 * "sleeping" and the contention routine ordering is
32 * protected by the semaphore spinlock.
34 * Note that these functions are only called when there is
35 * contention on the lock, and as such all this is the
36 * "non-critical" part of the whole semaphore business. The
37 * critical part is the inline stuff in <asm/semaphore.h>
38 * where we want to avoid any extra jumps and calls.
43 * - only on a boundary condition do we need to care. When we go
44 * from a negative count to a non-negative, we wake people up.
45 * - when we go from a non-negative count to a negative do we
46 * (a) synchronize with the "sleeper" count and (b) make sure
47 * that we're on the wakeup list before we synchronize so that
48 * we cannot lose wakeup events.
51 void __up(struct semaphore *sem)
56 static spinlock_t semaphore_lock = SPIN_LOCK_UNLOCKED;
58 void __sched __down(struct semaphore * sem)
60 struct task_struct *tsk = current;
61 DECLARE_WAITQUEUE(wait, tsk);
62 tsk->state = TASK_UNINTERRUPTIBLE;
63 add_wait_queue_exclusive(&sem->wait, &wait);
65 spin_lock_irq(&semaphore_lock);
68 int sleepers = sem->sleepers;
71 * Add "everybody else" into it. They aren't
72 * playing, because we own the spinlock.
74 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
78 sem->sleepers = 1; /* us - see -1 above */
79 spin_unlock_irq(&semaphore_lock);
82 tsk->state = TASK_UNINTERRUPTIBLE;
83 spin_lock_irq(&semaphore_lock);
85 spin_unlock_irq(&semaphore_lock);
86 remove_wait_queue(&sem->wait, &wait);
87 tsk->state = TASK_RUNNING;
91 int __sched __down_interruptible(struct semaphore * sem)
94 struct task_struct *tsk = current;
95 DECLARE_WAITQUEUE(wait, tsk);
96 tsk->state = TASK_INTERRUPTIBLE;
97 add_wait_queue_exclusive(&sem->wait, &wait);
99 spin_lock_irq(&semaphore_lock);
102 int sleepers = sem->sleepers;
105 * With signals pending, this turns into
106 * the trylock failure case - we won't be
107 * sleeping, and we* can't get the lock as
108 * it has contention. Just correct the count
111 if (signal_pending(current)) {
114 atomic_add(sleepers, &sem->count);
119 * Add "everybody else" into it. They aren't
120 * playing, because we own the spinlock. The
121 * "-1" is because we're still hoping to get
124 if (!atomic_add_negative(sleepers - 1, &sem->count)) {
128 sem->sleepers = 1; /* us - see -1 above */
129 spin_unlock_irq(&semaphore_lock);
132 tsk->state = TASK_INTERRUPTIBLE;
133 spin_lock_irq(&semaphore_lock);
135 spin_unlock_irq(&semaphore_lock);
136 tsk->state = TASK_RUNNING;
137 remove_wait_queue(&sem->wait, &wait);
143 * Trylock failed - make sure we correct for
144 * having decremented the count.
146 * We could have done the trylock with a
147 * single "cmpxchg" without failure cases,
148 * but then it wouldn't work on a 386.
150 int __down_trylock(struct semaphore * sem)
155 spin_lock_irqsave(&semaphore_lock, flags);
156 sleepers = sem->sleepers + 1;
160 * Add "everybody else" and us into it. They aren't
161 * playing, because we own the spinlock.
163 if (!atomic_add_negative(sleepers, &sem->count))
166 spin_unlock_irqrestore(&semaphore_lock, flags);
171 * The semaphore operations have a special calling sequence that
172 * allow us to do a simpler in-line version of them. These routines
173 * need to convert that sequence back into the C sequence when
174 * there is contention on the semaphore.
176 * ip contains the semaphore pointer on entry. Save the C-clobbered
177 * registers (r0 to r3 and lr), but not ip, as we use it as a return
178 * value in some cases..
180 asm(" .section .sched.text \n\
182 .globl __down_failed \n\
184 stmfd sp!, {r0 - r3, lr} \n\
187 ldmfd sp!, {r0 - r3, pc} \n\
190 .globl __down_interruptible_failed \n\
191 __down_interruptible_failed: \n\
192 stmfd sp!, {r0 - r3, lr} \n\
194 bl __down_interruptible \n\
196 ldmfd sp!, {r0 - r3, pc} \n\
199 .globl __down_trylock_failed \n\
200 __down_trylock_failed: \n\
201 stmfd sp!, {r0 - r3, lr} \n\
203 bl __down_trylock \n\
205 ldmfd sp!, {r0 - r3, pc} \n\
208 .globl __up_wakeup \n\
210 stmfd sp!, {r0 - r3, lr} \n\
213 ldmfd sp!, {r0 - r3, pc} \n\