2 * Debugging versions of SMP locking primitives.
4 * Copyright (C) 2004 Thibaut VARENE <varenet@esiee.fr>
6 * Some code stollen from alpha & sparc64 ;)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/config.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/spinlock.h>
28 #include <linux/hardirq.h> /* in_interrupt() */
29 #include <asm/system.h>
32 #define INIT_STUCK 1L << 30
34 #ifdef CONFIG_DEBUG_SPINLOCK
36 void _dbg_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
38 volatile unsigned int *a;
39 long stuck = INIT_STUCK;
40 void *inline_pc = __builtin_return_address(0);
41 unsigned long started = jiffies;
43 int cpu = smp_processor_id();
47 /* Do the actual locking */
48 /* <T-Bone> ggg: we can't get stuck on the outter loop?
49 * <ggg> T-Bone: We can hit the outer loop
50 * alot if multiple CPUs are constantly racing for a lock
51 * and the backplane is NOT fair about which CPU sees
52 * the update first. But it won't hang since every failed
53 * attempt will drop us back into the inner loop and
55 * <ggg> K-class and some of the others are NOT fair in the HW
56 * implementation so we could see false positives.
57 * But fixing the lock contention is easier than
58 * fixing the HW to be fair.
59 * <tausq> __ldcw() returns 1 if we get the lock; otherwise we
60 * spin until the value of the lock changes, or we time out.
62 a = __ldcw_align(lock);
63 while (stuck && (__ldcw(a) == 0))
64 while ((*a == 0) && --stuck);
66 if (unlikely(stuck <= 0)) {
68 "%s:%d: spin_lock(%s/%p) stuck in %s at %p(%d)"
69 " owned by %s:%d in %s at %p(%d)\n",
70 base_file, line_no, lock->module, lock,
71 current->comm, inline_pc, cpu,
72 lock->bfile, lock->bline, lock->task->comm,
73 lock->previous, lock->oncpu);
79 /* Exiting. Got the lock. */
81 lock->previous = inline_pc;
83 lock->bfile = (char *)base_file;
84 lock->bline = line_no;
86 if (unlikely(printed)) {
88 "%s:%d: spin_lock grabbed in %s at %p(%d) %ld ticks\n",
89 base_file, line_no, current->comm, inline_pc,
90 cpu, jiffies - started);
94 void _dbg_spin_unlock(spinlock_t * lock, const char *base_file, int line_no)
97 volatile unsigned int *a = __ldcw_align(lock);
98 if (unlikely((*a != 0) && lock->babble)) {
101 "%s:%d: spin_unlock(%s:%p) not locked\n",
102 base_file, line_no, lock->module, lock);
107 int _dbg_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
110 volatile unsigned int *a = __ldcw_align(lock);
111 if ((ret = (__ldcw(a) != 0))) {
112 lock->oncpu = smp_processor_id();
113 lock->previous = __builtin_return_address(0);
114 lock->task = current;
116 lock->bfile = (char *)base_file;
117 lock->bline = line_no;
122 #endif /* CONFIG_DEBUG_SPINLOCK */
124 #ifdef CONFIG_DEBUG_RWLOCK
126 /* Interrupts trouble detailed explanation, thx Grant:
128 * o writer (wants to modify data) attempts to acquire the rwlock
129 * o He gets the write lock.
130 * o Interupts are still enabled, we take an interrupt with the
131 * write still holding the lock.
132 * o interrupt handler tries to acquire the rwlock for read.
133 * o deadlock since the writer can't release it at this point.
135 * In general, any use of spinlocks that competes between "base"
136 * level and interrupt level code will risk deadlock. Interrupts
137 * need to be disabled in the base level routines to avoid it.
138 * Or more precisely, only the IRQ the base level routine
139 * is competing with for the lock. But it's more efficient/faster
140 * to just disable all interrupts on that CPU to guarantee
141 * once it gets the lock it can release it quickly too.
144 void _dbg_write_lock(rwlock_t *rw, const char *bfile, int bline)
146 void *inline_pc = __builtin_return_address(0);
147 unsigned long started = jiffies;
148 long stuck = INIT_STUCK;
150 int cpu = smp_processor_id();
152 if(unlikely(in_interrupt())) { /* acquiring write lock in interrupt context, bad idea */
153 printk(KERN_WARNING "write_lock caller: %s:%d, IRQs enabled,\n", bfile, bline);
157 /* Note: if interrupts are disabled (which is most likely), the printk
158 will never show on the console. We might need a polling method to flush
159 the dmesg buffer anyhow. */
162 _raw_spin_lock(&rw->lock);
164 if(rw->counter != 0) {
165 /* this basically never happens */
166 _raw_spin_unlock(&rw->lock);
169 if ((unlikely(stuck <= 0)) && (rw->counter < 0)) {
171 "%s:%d: write_lock stuck on writer"
172 " in %s at %p(%d) %ld ticks\n",
173 bfile, bline, current->comm, inline_pc,
174 cpu, jiffies - started);
178 else if (unlikely(stuck <= 0)) {
180 "%s:%d: write_lock stuck on reader"
181 " in %s at %p(%d) %ld ticks\n",
182 bfile, bline, current->comm, inline_pc,
183 cpu, jiffies - started);
188 while(rw->counter != 0);
193 /* got it. now leave without unlocking */
194 rw->counter = -1; /* remember we are locked */
196 if (unlikely(printed)) {
198 "%s:%d: write_lock grabbed in %s at %p(%d) %ld ticks\n",
199 bfile, bline, current->comm, inline_pc,
200 cpu, jiffies - started);
204 void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline)
207 void *inline_pc = __builtin_return_address(0);
208 unsigned long started = jiffies;
209 int cpu = smp_processor_id();
213 local_irq_save(flags);
214 _raw_spin_lock(&rw->lock);
219 "%s:%d: read_lock grabbed in %s at %p(%d) %ld ticks\n",
220 bfile, bline, current->comm, inline_pc,
221 cpu, jiffies - started);
223 _raw_spin_unlock(&rw->lock);
224 local_irq_restore(flags);
227 #endif /* CONFIG_DEBUG_RWLOCK */