VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / include / asm-parisc / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 #include <asm/system.h>
5
6 /* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
7  * since it only has load-and-zero. Moreover, at least on some PA processors,
8  * the semaphore address has to be 16-byte aligned.
9  */
10
11 #ifndef CONFIG_DEBUG_SPINLOCK
12
13 #define __SPIN_LOCK_UNLOCKED    { { 1, 1, 1, 1 } }
14 #undef SPIN_LOCK_UNLOCKED
15 #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
16
17 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
18
19 static inline int spin_is_locked(spinlock_t *x)
20 {
21         volatile unsigned int *a = __ldcw_align(x);
22         return *a == 0;
23 }
24
25 #define spin_unlock_wait(x)     do { barrier(); } while(spin_is_locked(x))
26 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
27
28 static inline void _raw_spin_lock(spinlock_t *x)
29 {
30         volatile unsigned int *a = __ldcw_align(x);
31         while (__ldcw(a) == 0)
32                 while (*a == 0);
33 }
34
35 static inline void _raw_spin_unlock(spinlock_t *x)
36 {
37         volatile unsigned int *a = __ldcw_align(x);
38         *a = 1;
39 }
40
41 static inline int _raw_spin_trylock(spinlock_t *x)
42 {
43         volatile unsigned int *a = __ldcw_align(x);
44         return __ldcw(a) != 0;
45 }
46         
47 #define spin_lock_own(LOCK, LOCATION)   ((void)0)
48
49 #else /* !(CONFIG_DEBUG_SPINLOCK) */
50
51 #define SPINLOCK_MAGIC  0x1D244B3C
52
53 #define __SPIN_LOCK_UNLOCKED    { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
54 #undef SPIN_LOCK_UNLOCKED
55 #define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
56
57 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
58
59 #define CHECK_LOCK(x)                                                   \
60         do {                                                            \
61                 if (unlikely((x)->magic != SPINLOCK_MAGIC)) {                   \
62                         printk(KERN_ERR "%s:%d: spin_is_locked"         \
63                         " on uninitialized spinlock %p.\n",             \
64                                 __FILE__, __LINE__, (x));               \
65                 }                                                       \
66         } while(0)
67
68 #define spin_is_locked(x)                                               \
69         ({                                                              \
70                 CHECK_LOCK(x);                                          \
71                 volatile unsigned int *a = __ldcw_align(x);             \
72                 if (unlikely((*a == 0) && (x)->babble)) {                               \
73                         (x)->babble--;                                  \
74                         printk("KERN_WARNING                            \
75                                 %s:%d: spin_is_locked(%s/%p) already"   \
76                                 " locked by %s:%d in %s at %p(%d)\n",   \
77                                 __FILE__,__LINE__, (x)->module, (x),    \
78                                 (x)->bfile, (x)->bline, (x)->task->comm,\
79                                 (x)->previous, (x)->oncpu);             \
80                 }                                                       \
81                 *a == 0;                                                \
82         })
83
84 #define spin_unlock_wait(x)                                             \
85         do {                                                            \
86                 CHECK_LOCK(x);                                          \
87                 volatile unsigned int *a = __ldcw_align(x);             \
88                 if (unlikely((*a == 0) && (x)->babble)) {                               \
89                         (x)->babble--;                                  \
90                         printk("KERN_WARNING                            \
91                                 %s:%d: spin_unlock_wait(%s/%p)"         \
92                                 " owned by %s:%d in %s at %p(%d)\n",    \
93                                 __FILE__,__LINE__, (x)->module, (x),    \
94                                 (x)->bfile, (x)->bline, (x)->task->comm,\
95                                 (x)->previous, (x)->oncpu);             \
96                 }                                                       \
97                 barrier();                                              \
98         } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
99
100 extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
101 extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
102 extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
103
104 #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
105
106 #define _raw_spin_unlock(lock)  _dbg_spin_unlock(lock, __FILE__, __LINE__)
107 #define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
108 #define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
109
110 /* just in case we need it */
111 #define spin_lock_own(LOCK, LOCATION)                                   \
112 do {                                                                    \
113         volatile unsigned int *a = __ldcw_align(LOCK);                  \
114         if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id())))      \
115                 printk("KERN_WARNING                                    \
116                         %s: called on %d from %p but lock %s on %d\n",  \
117                         LOCATION, smp_processor_id(),                   \
118                         __builtin_return_address(0),                    \
119                         (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
120 } while (0)
121
122 #endif /* !(CONFIG_DEBUG_SPINLOCK) */
123
124 /*
125  * Read-write spinlocks, allowing multiple readers
126  * but only one writer.
127  */
128 typedef struct {
129         spinlock_t lock;
130         volatile int counter;
131 } rwlock_t;
132
133 #define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
134
135 #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
136
137 #define rwlock_is_locked(lp) ((lp)->counter != 0)
138
139 /* read_lock, read_unlock are pretty straightforward.  Of course it somehow
140  * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
141
142 #ifdef CONFIG_DEBUG_RWLOCK
143 extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
144 #define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
145 #else
146 static  __inline__ void _raw_read_lock(rwlock_t *rw)
147 {
148         unsigned long flags;
149         local_irq_save(flags);
150         _raw_spin_lock(&rw->lock); 
151
152         rw->counter++;
153
154         _raw_spin_unlock(&rw->lock);
155         local_irq_restore(flags);
156 }
157 #endif  /* CONFIG_DEBUG_RWLOCK */
158
159 static  __inline__ void _raw_read_unlock(rwlock_t *rw)
160 {
161         unsigned long flags;
162         local_irq_save(flags);
163         _raw_spin_lock(&rw->lock); 
164
165         rw->counter--;
166
167         _raw_spin_unlock(&rw->lock);
168         local_irq_restore(flags);
169 }
170
171 /* write_lock is less trivial.  We optimistically grab the lock and check
172  * if we surprised any readers.  If so we release the lock and wait till
173  * they're all gone before trying again
174  *
175  * Also note that we don't use the _irqsave / _irqrestore suffixes here.
176  * If we're called with interrupts enabled and we've got readers (or other
177  * writers) in interrupt handlers someone fucked up and we'd dead-lock
178  * sooner or later anyway.   prumpf */
179
180 #ifdef CONFIG_DEBUG_RWLOCK
181 extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
182 #define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
183 #else
184 static  __inline__ void _raw_write_lock(rwlock_t *rw)
185 {
186 retry:
187         _raw_spin_lock(&rw->lock);
188
189         if(rw->counter != 0) {
190                 /* this basically never happens */
191                 _raw_spin_unlock(&rw->lock);
192
193                 while(rw->counter != 0);
194
195                 goto retry;
196         }
197
198         /* got it.  now leave without unlocking */
199         rw->counter = -1; /* remember we are locked */
200 }
201 #endif /* CONFIG_DEBUG_RWLOCK */
202
203 /* write_unlock is absolutely trivial - we don't have to wait for anything */
204
205 static  __inline__ void _raw_write_unlock(rwlock_t *rw)
206 {
207         rw->counter = 0;
208         _raw_spin_unlock(&rw->lock);
209 }
210
211 static __inline__ int is_read_locked(rwlock_t *rw)
212 {
213         return rw->counter > 0;
214 }
215
216 static __inline__ int is_write_locked(rwlock_t *rw)
217 {
218         return rw->counter < 0;
219 }
220
221 #endif /* __ASM_SPINLOCK_H */