vserver 1.9.3
[linux-2.6.git] / include / asm-i386 / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
6 #include <asm/page.h>
7 #include <linux/config.h>
8 #include <linux/compiler.h>
9
10 asmlinkage int printk(const char * fmt, ...)
11         __attribute__ ((format (printf, 1, 2)));
12
13 /*
14  * Your basic SMP spinlocks, allowing only a single CPU anywhere
15  */
16
17 typedef struct {
18         volatile unsigned int lock;
19 #ifdef CONFIG_DEBUG_SPINLOCK
20         unsigned magic;
21 #endif
22 } spinlock_t;
23
24 #define SPINLOCK_MAGIC  0xdead4ead
25
26 #ifdef CONFIG_DEBUG_SPINLOCK
27 #define SPINLOCK_MAGIC_INIT     , SPINLOCK_MAGIC
28 #else
29 #define SPINLOCK_MAGIC_INIT     /* */
30 #endif
31
32 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 SPINLOCK_MAGIC_INIT }
33
34 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
35
36 /*
37  * Simple spin lock operations.  There are two variants, one clears IRQ's
38  * on the local processor, one does not.
39  *
40  * We make no fairness assumptions. They have a cost.
41  */
42
43 #define spin_is_locked(x)       (*(volatile signed char *)(&(x)->lock) <= 0)
44 #define spin_unlock_wait(x)     do { barrier(); } while(spin_is_locked(x))
45
46 #define spin_lock_string \
47         "\n1:\t" \
48         "lock ; decb %0\n\t" \
49         "jns 3f\n" \
50         "2:\t" \
51         "rep;nop\n\t" \
52         "cmpb $0,%0\n\t" \
53         "jle 2b\n\t" \
54         "jmp 1b\n" \
55         "3:\n\t"
56
57 #define spin_lock_string_flags \
58         "\n1:\t" \
59         "lock ; decb %0\n\t" \
60         "jns 4f\n\t" \
61         "2:\t" \
62         "testl $0x200, %1\n\t" \
63         "jz 3f\n\t" \
64         "sti\n\t" \
65         "3:\t" \
66         "rep;nop\n\t" \
67         "cmpb $0, %0\n\t" \
68         "jle 3b\n\t" \
69         "cli\n\t" \
70         "jmp 1b\n" \
71         "4:\n\t"
72
73 /*
74  * This works. Despite all the confusion.
75  * (except on PPro SMP or if we are using OOSTORE)
76  * (PPro errata 66, 92)
77  */
78  
79 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
80
81 #define spin_unlock_string \
82         "movb $1,%0" \
83                 :"=m" (lock->lock) : : "memory"
84
85
86 static inline void _raw_spin_unlock(spinlock_t *lock)
87 {
88 #ifdef CONFIG_DEBUG_SPINLOCK
89         BUG_ON(lock->magic != SPINLOCK_MAGIC);
90         BUG_ON(!spin_is_locked(lock));
91 #endif
92         __asm__ __volatile__(
93                 spin_unlock_string
94         );
95 }
96
97 #else
98
99 #define spin_unlock_string \
100         "xchgb %b0, %1" \
101                 :"=q" (oldval), "=m" (lock->lock) \
102                 :"0" (oldval) : "memory"
103
104 static inline void _raw_spin_unlock(spinlock_t *lock)
105 {
106         char oldval = 1;
107 #ifdef CONFIG_DEBUG_SPINLOCK
108         BUG_ON(lock->magic != SPINLOCK_MAGIC);
109         BUG_ON(!spin_is_locked(lock));
110 #endif
111         __asm__ __volatile__(
112                 spin_unlock_string
113         );
114 }
115
116 #endif
117
118 static inline int _raw_spin_trylock(spinlock_t *lock)
119 {
120         char oldval;
121         __asm__ __volatile__(
122                 "xchgb %b0,%1"
123                 :"=q" (oldval), "=m" (lock->lock)
124                 :"0" (0) : "memory");
125         return oldval > 0;
126 }
127
128 static inline void _raw_spin_lock(spinlock_t *lock)
129 {
130 #ifdef CONFIG_DEBUG_SPINLOCK
131         if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
132                 printk("eip: %p\n", __builtin_return_address(0));
133                 BUG();
134         }
135 #endif
136         __asm__ __volatile__(
137                 spin_lock_string
138                 :"=m" (lock->lock) : : "memory");
139 }
140
141 static inline void _raw_spin_lock_flags (spinlock_t *lock, unsigned long flags)
142 {
143 #ifdef CONFIG_DEBUG_SPINLOCK
144         if (unlikely(lock->magic != SPINLOCK_MAGIC)) {
145                 printk("eip: %p\n", __builtin_return_address(0));
146                 BUG();
147         }
148 #endif
149         __asm__ __volatile__(
150                 spin_lock_string_flags
151                 :"=m" (lock->lock) : "r" (flags) : "memory");
152 }
153
154 /*
155  * Read-write spinlocks, allowing multiple readers
156  * but only one writer.
157  *
158  * NOTE! it is quite common to have readers in interrupts
159  * but no interrupt writers. For those circumstances we
160  * can "mix" irq-safe locks - any writer needs to get a
161  * irq-safe write-lock, but readers can get non-irqsafe
162  * read-locks.
163  */
164 typedef struct {
165         volatile unsigned int lock;
166 #ifdef CONFIG_DEBUG_SPINLOCK
167         unsigned magic;
168 #endif
169 } rwlock_t;
170
171 #define RWLOCK_MAGIC    0xdeaf1eed
172
173 #ifdef CONFIG_DEBUG_SPINLOCK
174 #define RWLOCK_MAGIC_INIT       , RWLOCK_MAGIC
175 #else
176 #define RWLOCK_MAGIC_INIT       /* */
177 #endif
178
179 #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }
180
181 #define rwlock_init(x)  do { *(x) = RW_LOCK_UNLOCKED; } while(0)
182
183 #define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
184
185 /*
186  * On x86, we implement read-write locks as a 32-bit counter
187  * with the high bit (sign) being the "contended" bit.
188  *
189  * The inline assembly is non-obvious. Think about it.
190  *
191  * Changed to use the same technique as rw semaphores.  See
192  * semaphore.h for details.  -ben
193  */
194 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
195
196 static inline void _raw_read_lock(rwlock_t *rw)
197 {
198 #ifdef CONFIG_DEBUG_SPINLOCK
199         BUG_ON(rw->magic != RWLOCK_MAGIC);
200 #endif
201         __build_read_lock(rw, "__read_lock_failed");
202 }
203
204 static inline void _raw_write_lock(rwlock_t *rw)
205 {
206 #ifdef CONFIG_DEBUG_SPINLOCK
207         BUG_ON(rw->magic != RWLOCK_MAGIC);
208 #endif
209         __build_write_lock(rw, "__write_lock_failed");
210 }
211
212 #define _raw_read_unlock(rw)            asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
213 #define _raw_write_unlock(rw)   asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
214
215 static inline int _raw_write_trylock(rwlock_t *lock)
216 {
217         atomic_t *count = (atomic_t *)lock;
218         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
219                 return 1;
220         atomic_add(RW_LOCK_BIAS, count);
221         return 0;
222 }
223
224 #endif /* __ASM_SPINLOCK_H */