vserver 1.9.3
[linux-2.6.git] / include / asm-ppc64 / spinlock.h
1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
3
4 /*
5  * Simple spin lock operations.  
6  *
7  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
8  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
10  *      Rework to support virtual processors
11  *
12  * Type of int is used as a full 64b word is not necessary.
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License
16  * as published by the Free Software Foundation; either version
17  * 2 of the License, or (at your option) any later version.
18  */
19 #include <linux/config.h>
20 #include <asm/paca.h>
21 #include <asm/hvcall.h>
22 #include <asm/iSeries/HvCall.h>
23
24 typedef struct {
25         volatile unsigned int lock;
26 } spinlock_t;
27
28 typedef struct {
29         volatile signed int lock;
30 } rwlock_t;
31
32 #ifdef __KERNEL__
33 #define SPIN_LOCK_UNLOCKED      (spinlock_t) { 0 }
34
35 #define spin_is_locked(x)       ((x)->lock != 0)
36 #define spin_lock_init(x)       do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
37
38 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
39 {
40         __asm__ __volatile__("lwsync    # spin_unlock": : :"memory");
41         lock->lock = 0;
42 }
43
44 /*
45  * On a system with shared processors (that is, where a physical
46  * processor is multiplexed between several virtual processors),
47  * there is no point spinning on a lock if the holder of the lock
48  * isn't currently scheduled on a physical processor.  Instead
49  * we detect this situation and ask the hypervisor to give the
50  * rest of our timeslice to the lock holder.
51  *
52  * So that we can tell which virtual processor is holding a lock,
53  * we put 0x80000000 | smp_processor_id() in the lock when it is
54  * held.  Conveniently, we have a word in the paca that holds this
55  * value.
56  */
57
58 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
59 /* We only yield to the hypervisor if we are in shared processor mode */
60 #define SHARED_PROCESSOR (get_paca()->lppaca.xSharedProc)
61 extern void __spin_yield(spinlock_t *lock);
62 extern void __rw_yield(rwlock_t *lock);
63 #else /* SPLPAR || ISERIES */
64 #define __spin_yield(x) barrier()
65 #define __rw_yield(x)   barrier()
66 #define SHARED_PROCESSOR        0
67 #endif
68 extern void spin_unlock_wait(spinlock_t *lock);
69
70 /*
71  * This returns the old value in the lock, so we succeeded
72  * in getting the lock if the return value is 0.
73  */
74 static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
75 {
76         unsigned long tmp, tmp2;
77
78         __asm__ __volatile__(
79 "       lwz             %1,%3(13)               # __spin_trylock\n\
80 1:      lwarx           %0,0,%2\n\
81         cmpwi           0,%0,0\n\
82         bne-            2f\n\
83         stwcx.          %1,0,%2\n\
84         bne-            1b\n\
85         isync\n\
86 2:"     : "=&r" (tmp), "=&r" (tmp2)
87         : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
88         : "cr0", "memory");
89
90         return tmp;
91 }
92
93 static int __inline__ _raw_spin_trylock(spinlock_t *lock)
94 {
95         return __spin_trylock(lock) == 0;
96 }
97
98 static void __inline__ _raw_spin_lock(spinlock_t *lock)
99 {
100         while (1) {
101                 if (likely(__spin_trylock(lock) == 0))
102                         break;
103                 do {
104                         HMT_low();
105                         if (SHARED_PROCESSOR)
106                                 __spin_yield(lock);
107                 } while (likely(lock->lock != 0));
108                 HMT_medium();
109         }
110 }
111
112 static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
113 {
114         unsigned long flags_dis;
115
116         while (1) {
117                 if (likely(__spin_trylock(lock) == 0))
118                         break;
119                 local_save_flags(flags_dis);
120                 local_irq_restore(flags);
121                 do {
122                         HMT_low();
123                         if (SHARED_PROCESSOR)
124                                 __spin_yield(lock);
125                 } while (likely(lock->lock != 0));
126                 HMT_medium();
127                 local_irq_restore(flags_dis);
128         }
129 }
130
131 /*
132  * Read-write spinlocks, allowing multiple readers
133  * but only one writer.
134  *
135  * NOTE! it is quite common to have readers in interrupts
136  * but no interrupt writers. For those circumstances we
137  * can "mix" irq-safe locks - any writer needs to get a
138  * irq-safe write-lock, but readers can get non-irqsafe
139  * read-locks.
140  */
141 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
142
143 #define rwlock_init(x)          do { *(x) = RW_LOCK_UNLOCKED; } while(0)
144 #define rwlock_is_locked(x)     ((x)->lock)
145
146 static __inline__ int is_read_locked(rwlock_t *rw)
147 {
148         return rw->lock > 0;
149 }
150
151 static __inline__ int is_write_locked(rwlock_t *rw)
152 {
153         return rw->lock < 0;
154 }
155
156 static __inline__ void _raw_write_unlock(rwlock_t *rw)
157 {
158         __asm__ __volatile__("lwsync            # write_unlock": : :"memory");
159         rw->lock = 0;
160 }
161
162 /*
163  * This returns the old value in the lock + 1,
164  * so we got a read lock if the return value is > 0.
165  */
166 static long __inline__ __read_trylock(rwlock_t *rw)
167 {
168         long tmp;
169
170         __asm__ __volatile__(
171 "1:     lwarx           %0,0,%1         # read_trylock\n\
172         extsw           %0,%0\n\
173         addic.          %0,%0,1\n\
174         ble-            2f\n\
175         stwcx.          %0,0,%1\n\
176         bne-            1b\n\
177         isync\n\
178 2:"     : "=&r" (tmp)
179         : "r" (&rw->lock)
180         : "cr0", "xer", "memory");
181
182         return tmp;
183 }
184
185 static int __inline__ _raw_read_trylock(rwlock_t *rw)
186 {
187         return __read_trylock(rw) > 0;
188 }
189
190 static void __inline__ _raw_read_lock(rwlock_t *rw)
191 {
192         while (1) {
193                 if (likely(__read_trylock(rw) > 0))
194                         break;
195                 do {
196                         HMT_low();
197                         if (SHARED_PROCESSOR)
198                                 __rw_yield(rw);
199                 } while (likely(rw->lock < 0));
200                 HMT_medium();
201         }
202 }
203
204 static void __inline__ _raw_read_unlock(rwlock_t *rw)
205 {
206         long tmp;
207
208         __asm__ __volatile__(
209         "eieio                          # read_unlock\n\
210 1:      lwarx           %0,0,%1\n\
211         addic           %0,%0,-1\n\
212         stwcx.          %0,0,%1\n\
213         bne-            1b"
214         : "=&r"(tmp)
215         : "r"(&rw->lock)
216         : "cr0", "memory");
217 }
218
219 /*
220  * This returns the old value in the lock,
221  * so we got the write lock if the return value is 0.
222  */
223 static __inline__ long __write_trylock(rwlock_t *rw)
224 {
225         long tmp, tmp2;
226
227         __asm__ __volatile__(
228 "       lwz             %1,%3(13)       # write_trylock\n\
229 1:      lwarx           %0,0,%2\n\
230         cmpwi           0,%0,0\n\
231         bne-            2f\n\
232         stwcx.          %1,0,%2\n\
233         bne-            1b\n\
234         isync\n\
235 2:"     : "=&r" (tmp), "=&r" (tmp2)
236         : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
237         : "cr0", "memory");
238
239         return tmp;
240 }
241
242 static int __inline__ _raw_write_trylock(rwlock_t *rw)
243 {
244         return __write_trylock(rw) == 0;
245 }
246
247 static void __inline__ _raw_write_lock(rwlock_t *rw)
248 {
249         while (1) {
250                 if (likely(__write_trylock(rw) == 0))
251                         break;
252                 do {
253                         HMT_low();
254                         if (SHARED_PROCESSOR)
255                                 __rw_yield(rw);
256                 } while (likely(rw->lock != 0));
257                 HMT_medium();
258         }
259 }
260
261 #endif /* __KERNEL__ */
262 #endif /* __ASM_SPINLOCK_H */