VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / include / asm-ppc64 / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 /*
5  * Simple spin lock operations.  
6  *
7  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
8  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  *
10  * Type of int is used as a full 64b word is not necessary.
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version
15  * 2 of the License, or (at your option) any later version.
16  */
17 #include <linux/config.h>
18 #include <asm/paca.h>
19
20 typedef struct {
21         volatile unsigned int lock;
22 } spinlock_t;
23
24 #ifdef __KERNEL__
25 #define SPIN_LOCK_UNLOCKED      (spinlock_t) { 0 }
26
27 #define spin_is_locked(x)       ((x)->lock != 0)
28 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
29
30 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
31 {
32         __asm__ __volatile__("lwsync    # spin_unlock": : :"memory");
33         lock->lock = 0;
34 }
35
36 /*
37  * Normally we use the spinlock functions in arch/ppc64/lib/locks.c.
38  * For special applications such as profiling, we can have the
39  * spinlock functions inline by defining CONFIG_SPINLINE.
40  * This is not recommended on partitioned systems with shared
41  * processors, since the inline spinlock functions don't include
42  * the code for yielding the CPU to the lock holder.
43  */
44
45 #ifndef CONFIG_SPINLINE
46 extern int _raw_spin_trylock(spinlock_t *lock);
47 extern void _raw_spin_lock(spinlock_t *lock);
48 extern void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags);
49 extern void spin_unlock_wait(spinlock_t *lock);
50
51 #else
52
53 static __inline__ int _raw_spin_trylock(spinlock_t *lock)
54 {
55         unsigned int tmp, tmp2;
56
57         __asm__ __volatile__(
58 "1:     lwarx           %0,0,%2         # spin_trylock\n\
59         cmpwi           0,%0,0\n\
60         bne-            2f\n\
61         lwz             %1,%3(13)\n\
62         stwcx.          %1,0,%2\n\
63         bne-            1b\n\
64         isync\n\
65 2:"     : "=&r"(tmp), "=&r"(tmp2)
66         : "r"(&lock->lock), "i"(offsetof(struct paca_struct, lock_token))
67         : "cr0", "memory");
68
69         return tmp == 0;
70 }
71
72 static __inline__ void _raw_spin_lock(spinlock_t *lock)
73 {
74         unsigned int tmp;
75
76         __asm__ __volatile__(
77         "b              2f              # spin_lock\n\
78 1:"
79         HMT_LOW
80 "       lwzx            %0,0,%1\n\
81         cmpwi           0,%0,0\n\
82         bne+            1b\n"
83         HMT_MEDIUM
84 "2:     lwarx           %0,0,%1\n\
85         cmpwi           0,%0,0\n\
86         bne-            1b\n\
87         lwz             %0,%2(13)\n\
88         stwcx.          %0,0,%1\n\
89         bne-            2b\n\
90         isync"
91         : "=&r"(tmp)
92         : "r"(&lock->lock), "i"(offsetof(struct paca_struct, lock_token))
93         : "cr0", "memory");
94 }
95
96 /*
97  * Note: if we ever want to inline the spinlocks on iSeries,
98  * we will have to change the irq enable/disable stuff in here.
99  */
100 static __inline__ void _raw_spin_lock_flags(spinlock_t *lock,
101                                             unsigned long flags)
102 {
103         unsigned int tmp;
104         unsigned long tmp2;
105
106         __asm__ __volatile__(
107         "b              3f              # spin_lock\n\
108 1:      mfmsr           %1\n\
109         mtmsrd          %3,1\n\
110 2:"     HMT_LOW
111 "       lwzx            %0,0,%2\n\
112         cmpwi           0,%0,0\n\
113         bne+            2b\n"
114         HMT_MEDIUM
115 "       mtmsrd          %1,1\n\
116 3:      lwarx           %0,0,%2\n\
117         cmpwi           0,%0,0\n\
118         bne-            1b\n\
119         lwz             %1,%4(13)\n\
120         stwcx.          %1,0,%2\n\
121         bne-            3b\n\
122         isync"
123         : "=&r"(tmp), "=&r"(tmp2)
124         : "r"(&lock->lock), "r"(flags),
125           "i" (offsetof(struct paca_struct, lock_token))
126         : "cr0", "memory");
127 }
128
129 #define spin_unlock_wait(x)     do { cpu_relax(); } while (spin_is_locked(x))
130
131 #endif /* CONFIG_SPINLINE */
132
133 /*
134  * Read-write spinlocks, allowing multiple readers
135  * but only one writer.
136  *
137  * NOTE! it is quite common to have readers in interrupts
138  * but no interrupt writers. For those circumstances we
139  * can "mix" irq-safe locks - any writer needs to get a
140  * irq-safe write-lock, but readers can get non-irqsafe
141  * read-locks.
142  */
143 typedef struct {
144         volatile signed int lock;
145 } rwlock_t;
146
147 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
148
149 #define rwlock_init(x)          do { *(x) = RW_LOCK_UNLOCKED; } while(0)
150 #define rwlock_is_locked(x)     ((x)->lock)
151
152 static __inline__ int is_read_locked(rwlock_t *rw)
153 {
154         return rw->lock > 0;
155 }
156
157 static __inline__ int is_write_locked(rwlock_t *rw)
158 {
159         return rw->lock < 0;
160 }
161
162 static __inline__ void _raw_write_unlock(rwlock_t *rw)
163 {
164         __asm__ __volatile__("lwsync            # write_unlock": : :"memory");
165         rw->lock = 0;
166 }
167
168 #ifndef CONFIG_SPINLINE
169 extern int _raw_read_trylock(rwlock_t *rw);
170 extern void _raw_read_lock(rwlock_t *rw);
171 extern void _raw_read_unlock(rwlock_t *rw);
172 extern int _raw_write_trylock(rwlock_t *rw);
173 extern void _raw_write_lock(rwlock_t *rw);
174 extern void _raw_write_unlock(rwlock_t *rw);
175
176 #else
177 static __inline__ int _raw_read_trylock(rwlock_t *rw)
178 {
179         unsigned int tmp;
180         unsigned int ret;
181
182         __asm__ __volatile__(
183 "1:     lwarx           %0,0,%2         # read_trylock\n\
184         li              %1,0\n\
185         extsw           %0,%0\n\
186         addic.          %0,%0,1\n\
187         ble-            2f\n\
188         stwcx.          %0,0,%2\n\
189         bne-            1b\n\
190         li              %1,1\n\
191         isync\n\
192 2:"     : "=&r"(tmp), "=&r"(ret)
193         : "r"(&rw->lock)
194         : "cr0", "memory");
195
196         return ret;
197 }
198
199 static __inline__ void _raw_read_lock(rwlock_t *rw)
200 {
201         unsigned int tmp;
202
203         __asm__ __volatile__(
204         "b              2f              # read_lock\n\
205 1:"
206         HMT_LOW
207 "       lwax            %0,0,%1\n\
208         cmpwi           0,%0,0\n\
209         blt+            1b\n"
210         HMT_MEDIUM
211 "2:     lwarx           %0,0,%1\n\
212         extsw           %0,%0\n\
213         addic.          %0,%0,1\n\
214         ble-            1b\n\
215         stwcx.          %0,0,%1\n\
216         bne-            2b\n\
217         isync"
218         : "=&r"(tmp)
219         : "r"(&rw->lock)
220         : "cr0", "memory");
221 }
222
223 static __inline__ void _raw_read_unlock(rwlock_t *rw)
224 {
225         unsigned int tmp;
226
227         __asm__ __volatile__(
228         "lwsync                         # read_unlock\n\
229 1:      lwarx           %0,0,%1\n\
230         addic           %0,%0,-1\n\
231         stwcx.          %0,0,%1\n\
232         bne-            1b"
233         : "=&r"(tmp)
234         : "r"(&rw->lock)
235         : "cr0", "memory");
236 }
237
238 static __inline__ int _raw_write_trylock(rwlock_t *rw)
239 {
240         unsigned int tmp;
241         unsigned int ret;
242
243         __asm__ __volatile__(
244 "1:     lwarx           %0,0,%2         # write_trylock\n\
245         cmpwi           0,%0,0\n\
246         li              %1,0\n\
247         bne-            2f\n\
248         stwcx.          %3,0,%2\n\
249         bne-            1b\n\
250         li              %1,1\n\
251         isync\n\
252 2:"     : "=&r"(tmp), "=&r"(ret)
253         : "r"(&rw->lock), "r"(-1)
254         : "cr0", "memory");
255
256         return ret;
257 }
258
259 static __inline__ void _raw_write_lock(rwlock_t *rw)
260 {
261         unsigned int tmp;
262
263         __asm__ __volatile__(
264         "b              2f              # write_lock\n\
265 1:"
266         HMT_LOW
267         "lwax           %0,0,%1\n\
268         cmpwi           0,%0,0\n\
269         bne+            1b\n"
270         HMT_MEDIUM
271 "2:     lwarx           %0,0,%1\n\
272         cmpwi           0,%0,0\n\
273         bne-            1b\n\
274         stwcx.          %2,0,%1\n\
275         bne-            2b\n\
276         isync"
277         : "=&r"(tmp)
278         : "r"(&rw->lock), "r"(-1)
279         : "cr0", "memory");
280 }
281 #endif /* CONFIG_SPINLINE */
282
283 #endif /* __KERNEL__ */
284 #endif /* __ASM_SPINLOCK_H */