1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/system.h>
6 /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
7 * since it only has load-and-zero. Moreover, at least on some PA processors,
8 * the semaphore address has to be 16-byte aligned.
11 #ifndef CONFIG_DEBUG_SPINLOCK
13 #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
14 #undef SPIN_LOCK_UNLOCKED
15 #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
17 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
19 static inline int spin_is_locked(spinlock_t *x)
21 volatile unsigned int *a = __ldcw_align(x);
25 #define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
26 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
28 static inline void _raw_spin_lock(spinlock_t *x)
30 volatile unsigned int *a = __ldcw_align(x);
31 while (__ldcw(a) == 0)
35 static inline void _raw_spin_unlock(spinlock_t *x)
37 volatile unsigned int *a = __ldcw_align(x);
41 static inline int _raw_spin_trylock(spinlock_t *x)
43 volatile unsigned int *a = __ldcw_align(x);
44 return __ldcw(a) != 0;
47 #define spin_lock_own(LOCK, LOCATION) ((void)0)
49 #else /* !(CONFIG_DEBUG_SPINLOCK) */
51 #define SPINLOCK_MAGIC 0x1D244B3C
53 #define __SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
54 #undef SPIN_LOCK_UNLOCKED
55 #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
57 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
59 #define CHECK_LOCK(x) \
61 if (unlikely((x)->magic != SPINLOCK_MAGIC)) { \
62 printk(KERN_ERR "%s:%d: spin_is_locked" \
63 " on uninitialized spinlock %p.\n", \
64 __FILE__, __LINE__, (x)); \
68 #define spin_is_locked(x) \
71 volatile unsigned int *a = __ldcw_align(x); \
72 if (unlikely((*a == 0) && (x)->babble)) { \
74 printk("KERN_WARNING \
75 %s:%d: spin_is_locked(%s/%p) already" \
76 " locked by %s:%d in %s at %p(%d)\n", \
77 __FILE__,__LINE__, (x)->module, (x), \
78 (x)->bfile, (x)->bline, (x)->task->comm,\
79 (x)->previous, (x)->oncpu); \
84 #define spin_unlock_wait(x) \
87 volatile unsigned int *a = __ldcw_align(x); \
88 if (unlikely((*a == 0) && (x)->babble)) { \
90 printk("KERN_WARNING \
91 %s:%d: spin_unlock_wait(%s/%p)" \
92 " owned by %s:%d in %s at %p(%d)\n", \
93 __FILE__,__LINE__, (x)->module, (x), \
94 (x)->bfile, (x)->bline, (x)->task->comm,\
95 (x)->previous, (x)->oncpu); \
98 } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
100 extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
101 extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
102 extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
104 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
106 #define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
107 #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
108 #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
110 /* just in case we need it */
111 #define spin_lock_own(LOCK, LOCATION) \
113 volatile unsigned int *a = __ldcw_align(LOCK); \
114 if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id()))) \
115 printk("KERN_WARNING \
116 %s: called on %d from %p but lock %s on %d\n", \
117 LOCATION, smp_processor_id(), \
118 __builtin_return_address(0), \
119 (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
122 #endif /* !(CONFIG_DEBUG_SPINLOCK) */
125 * Read-write spinlocks, allowing multiple readers
126 * but only one writer.
130 volatile int counter;
133 #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
135 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
137 #define rwlock_is_locked(lp) ((lp)->counter != 0)
139 /* read_lock, read_unlock are pretty straightforward. Of course it somehow
140 * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
142 #ifdef CONFIG_DEBUG_RWLOCK
143 extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
144 #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
146 static __inline__ void _raw_read_lock(rwlock_t *rw)
149 local_irq_save(flags);
150 _raw_spin_lock(&rw->lock);
154 _raw_spin_unlock(&rw->lock);
155 local_irq_restore(flags);
157 #endif /* CONFIG_DEBUG_RWLOCK */
159 static __inline__ void _raw_read_unlock(rwlock_t *rw)
162 local_irq_save(flags);
163 _raw_spin_lock(&rw->lock);
167 _raw_spin_unlock(&rw->lock);
168 local_irq_restore(flags);
171 /* write_lock is less trivial. We optimistically grab the lock and check
172 * if we surprised any readers. If so we release the lock and wait till
173 * they're all gone before trying again
175 * Also note that we don't use the _irqsave / _irqrestore suffixes here.
176 * If we're called with interrupts enabled and we've got readers (or other
177 * writers) in interrupt handlers someone fucked up and we'd dead-lock
178 * sooner or later anyway. prumpf */
180 #ifdef CONFIG_DEBUG_RWLOCK
181 extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
182 #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
184 static __inline__ void _raw_write_lock(rwlock_t *rw)
187 _raw_spin_lock(&rw->lock);
189 if(rw->counter != 0) {
190 /* this basically never happens */
191 _raw_spin_unlock(&rw->lock);
193 while(rw->counter != 0);
198 /* got it. now leave without unlocking */
199 rw->counter = -1; /* remember we are locked */
201 #endif /* CONFIG_DEBUG_RWLOCK */
203 /* write_unlock is absolutely trivial - we don't have to wait for anything */
205 static __inline__ void _raw_write_unlock(rwlock_t *rw)
208 _raw_spin_unlock(&rw->lock);
211 static __inline__ int is_read_locked(rwlock_t *rw)
213 return rw->counter > 0;
216 static __inline__ int is_write_locked(rwlock_t *rw)
218 return rw->counter < 0;
221 #endif /* __ASM_SPINLOCK_H */