patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / include / asm-ppc64 / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 /*
5  * Simple spin lock operations.  
6  *
7  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
8  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  *
10  * Type of int is used as a full 64b word is not necessary.
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License
14  * as published by the Free Software Foundation; either version
15  * 2 of the License, or (at your option) any later version.
16  */
17 #include <linux/config.h>
18
19 typedef struct {
20         volatile unsigned int lock;
21 } spinlock_t;
22
23 #ifdef __KERNEL__
24 #define SPIN_LOCK_UNLOCKED      (spinlock_t) { 0 }
25
26 #define spin_is_locked(x)       ((x)->lock != 0)
27 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
28
29 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
30 {
31         __asm__ __volatile__("lwsync    # spin_unlock": : :"memory");
32         lock->lock = 0;
33 }
34
35 /*
36  * Normally we use the spinlock functions in arch/ppc64/lib/locks.c.
37  * For special applications such as profiling, we can have the
38  * spinlock functions inline by defining CONFIG_SPINLINE.
39  * This is not recommended on partitioned systems with shared
40  * processors, since the inline spinlock functions don't include
41  * the code for yielding the CPU to the lock holder.
42  */
43
44 #ifndef CONFIG_SPINLINE
45 extern int _raw_spin_trylock(spinlock_t *lock);
46 extern void _raw_spin_lock(spinlock_t *lock);
47 extern void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags);
48 extern void spin_unlock_wait(spinlock_t *lock);
49
50 #else
51
52 static __inline__ int _raw_spin_trylock(spinlock_t *lock)
53 {
54         unsigned int tmp, tmp2;
55
56         __asm__ __volatile__(
57 "1:     lwarx           %0,0,%2         # spin_trylock\n\
58         cmpwi           0,%0,0\n\
59         bne-            2f\n\
60         lwz             %1,24(13)\n\
61         stwcx.          %1,0,%2\n\
62         bne-            1b\n\
63         isync\n\
64 2:"     : "=&r"(tmp), "=&r"(tmp2)
65         : "r"(&lock->lock)
66         : "cr0", "memory");
67
68         return tmp == 0;
69 }
70
71 static __inline__ void _raw_spin_lock(spinlock_t *lock)
72 {
73         unsigned int tmp;
74
75         __asm__ __volatile__(
76         "b              2f              # spin_lock\n\
77 1:"
78         HMT_LOW
79 "       lwzx            %0,0,%1\n\
80         cmpwi           0,%0,0\n\
81         bne+            1b\n"
82         HMT_MEDIUM
83 "2:     lwarx           %0,0,%1\n\
84         cmpwi           0,%0,0\n\
85         bne-            1b\n\
86         lwz             %0,24(13)\n\
87         stwcx.          %0,0,%1\n\
88         bne-            2b\n\
89         isync"
90         : "=&r"(tmp)
91         : "r"(&lock->lock)
92         : "cr0", "memory");
93 }
94
95 /*
96  * Note: if we ever want to inline the spinlocks on iSeries,
97  * we will have to change the irq enable/disable stuff in here.
98  */
99 static __inline__ void _raw_spin_lock_flags(spinlock_t *lock,
100                                             unsigned long flags)
101 {
102         unsigned int tmp;
103         unsigned long tmp2;
104
105         __asm__ __volatile__(
106         "b              3f              # spin_lock\n\
107 1:      mfmsr           %1\n\
108         mtmsrd          %3,1\n\
109 2:"     HMT_LOW
110 "       lwzx            %0,0,%2\n\
111         cmpwi           0,%0,0\n\
112         bne+            2b\n"
113         HMT_MEDIUM
114 "       mtmsrd          %1,1\n\
115 3:      lwarx           %0,0,%2\n\
116         cmpwi           0,%0,0\n\
117         bne-            1b\n\
118         lwz             %1,24(13)\n\
119         stwcx.          %1,0,%2\n\
120         bne-            3b\n\
121         isync"
122         : "=&r"(tmp), "=&r"(tmp2)
123         : "r"(&lock->lock), "r"(flags)
124         : "cr0", "memory");
125 }
126
127 #define spin_unlock_wait(x)     do { cpu_relax(); } while (spin_is_locked(x))
128
129 #endif /* CONFIG_SPINLINE */
130
131 /*
132  * Read-write spinlocks, allowing multiple readers
133  * but only one writer.
134  *
135  * NOTE! it is quite common to have readers in interrupts
136  * but no interrupt writers. For those circumstances we
137  * can "mix" irq-safe locks - any writer needs to get a
138  * irq-safe write-lock, but readers can get non-irqsafe
139  * read-locks.
140  */
141 typedef struct {
142         volatile signed int lock;
143 } rwlock_t;
144
145 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
146
147 #define rwlock_init(x)          do { *(x) = RW_LOCK_UNLOCKED; } while(0)
148 #define rwlock_is_locked(x)     ((x)->lock)
149
150 static __inline__ int is_read_locked(rwlock_t *rw)
151 {
152         return rw->lock > 0;
153 }
154
155 static __inline__ int is_write_locked(rwlock_t *rw)
156 {
157         return rw->lock < 0;
158 }
159
160 static __inline__ void _raw_write_unlock(rwlock_t *rw)
161 {
162         __asm__ __volatile__("lwsync            # write_unlock": : :"memory");
163         rw->lock = 0;
164 }
165
166 #ifndef CONFIG_SPINLINE
167 extern int _raw_read_trylock(rwlock_t *rw);
168 extern void _raw_read_lock(rwlock_t *rw);
169 extern void _raw_read_unlock(rwlock_t *rw);
170 extern int _raw_write_trylock(rwlock_t *rw);
171 extern void _raw_write_lock(rwlock_t *rw);
172 extern void _raw_write_unlock(rwlock_t *rw);
173
174 #else
175 static __inline__ int _raw_read_trylock(rwlock_t *rw)
176 {
177         unsigned int tmp;
178         unsigned int ret;
179
180         __asm__ __volatile__(
181 "1:     lwarx           %0,0,%2         # read_trylock\n\
182         li              %1,0\n\
183         extsw           %0,%0\n\
184         addic.          %0,%0,1\n\
185         ble-            2f\n\
186         stwcx.          %0,0,%2\n\
187         bne-            1b\n\
188         li              %1,1\n\
189         isync\n\
190 2:"     : "=&r"(tmp), "=&r"(ret)
191         : "r"(&rw->lock)
192         : "cr0", "memory");
193
194         return ret;
195 }
196
197 static __inline__ void _raw_read_lock(rwlock_t *rw)
198 {
199         unsigned int tmp;
200
201         __asm__ __volatile__(
202         "b              2f              # read_lock\n\
203 1:"
204         HMT_LOW
205 "       lwax            %0,0,%1\n\
206         cmpwi           0,%0,0\n\
207         blt+            1b\n"
208         HMT_MEDIUM
209 "2:     lwarx           %0,0,%1\n\
210         extsw           %0,%0\n\
211         addic.          %0,%0,1\n\
212         ble-            1b\n\
213         stwcx.          %0,0,%1\n\
214         bne-            2b\n\
215         isync"
216         : "=&r"(tmp)
217         : "r"(&rw->lock)
218         : "cr0", "memory");
219 }
220
221 static __inline__ void _raw_read_unlock(rwlock_t *rw)
222 {
223         unsigned int tmp;
224
225         __asm__ __volatile__(
226         "lwsync                         # read_unlock\n\
227 1:      lwarx           %0,0,%1\n\
228         addic           %0,%0,-1\n\
229         stwcx.          %0,0,%1\n\
230         bne-            1b"
231         : "=&r"(tmp)
232         : "r"(&rw->lock)
233         : "cr0", "memory");
234 }
235
236 static __inline__ int _raw_write_trylock(rwlock_t *rw)
237 {
238         unsigned int tmp;
239         unsigned int ret;
240
241         __asm__ __volatile__(
242 "1:     lwarx           %0,0,%2         # write_trylock\n\
243         cmpwi           0,%0,0\n\
244         li              %1,0\n\
245         bne-            2f\n\
246         stwcx.          %3,0,%2\n\
247         bne-            1b\n\
248         li              %1,1\n\
249         isync\n\
250 2:"     : "=&r"(tmp), "=&r"(ret)
251         : "r"(&rw->lock), "r"(-1)
252         : "cr0", "memory");
253
254         return ret;
255 }
256
257 static __inline__ void _raw_write_lock(rwlock_t *rw)
258 {
259         unsigned int tmp;
260
261         __asm__ __volatile__(
262         "b              2f              # write_lock\n\
263 1:"
264         HMT_LOW
265         "lwax           %0,0,%1\n\
266         cmpwi           0,%0,0\n\
267         bne+            1b\n"
268         HMT_MEDIUM
269 "2:     lwarx           %0,0,%1\n\
270         cmpwi           0,%0,0\n\
271         bne-            1b\n\
272         stwcx.          %2,0,%1\n\
273         bne-            2b\n\
274         isync"
275         : "=&r"(tmp)
276         : "r"(&rw->lock), "r"(-1)
277         : "cr0", "memory");
278 }
279 #endif /* CONFIG_SPINLINE */
280
281 #endif /* __KERNEL__ */
282 #endif /* __ASM_SPINLOCK_H */