VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / arch / ppc64 / lib / locks.c
1 /*
2  * Spin and read/write lock operations.
3  *
4  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7  *   Rework to support virtual processors
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <linux/stringify.h>
20 #include <asm/hvcall.h>
21 #include <asm/iSeries/HvCall.h>
22
23 #ifndef CONFIG_SPINLINE
24
25 /*
26  * On a system with shared processors (that is, where a physical
27  * processor is multiplexed between several virtual processors),
28  * there is no point spinning on a lock if the holder of the lock
29  * isn't currently scheduled on a physical processor.  Instead
30  * we detect this situation and ask the hypervisor to give the
31  * rest of our timeslice to the lock holder.
32  *
33  * So that we can tell which virtual processor is holding a lock,
34  * we put 0x80000000 | smp_processor_id() in the lock when it is
35  * held.  Conveniently, we have a word in the paca that holds this
36  * value.
37  */
38
39 /* waiting for a spinlock... */
40 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
41
42 /* We only yield to the hypervisor if we are in shared processor mode */
43 #define SHARED_PROCESSOR (get_paca()->lppaca.xSharedProc)
44
45 void __spin_yield(spinlock_t *lock)
46 {
47         unsigned int lock_value, holder_cpu, yield_count;
48         struct paca_struct *holder_paca;
49
50         lock_value = lock->lock;
51         if (lock_value == 0)
52                 return;
53         holder_cpu = lock_value & 0xffff;
54         BUG_ON(holder_cpu >= NR_CPUS);
55         holder_paca = &paca[holder_cpu];
56         yield_count = holder_paca->lppaca.xYieldCount;
57         if ((yield_count & 1) == 0)
58                 return;         /* virtual cpu is currently running */
59         rmb();
60         if (lock->lock != lock_value)
61                 return;         /* something has changed */
62 #ifdef CONFIG_PPC_ISERIES
63         HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
64                 ((u64)holder_cpu << 32) | yield_count);
65 #else
66         plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
67 #endif
68 }
69
70 #else /* SPLPAR || ISERIES */
71 #define __spin_yield(x) barrier()
72 #define SHARED_PROCESSOR        0
73 #endif
74
75 /*
76  * This returns the old value in the lock, so we succeeded
77  * in getting the lock if the return value is 0.
78  */
79 static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
80 {
81         unsigned long tmp, tmp2;
82
83         __asm__ __volatile__(
84 "       lwz             %1,%3(13)               # __spin_trylock\n\
85 1:      lwarx           %0,0,%2\n\
86         cmpwi           0,%0,0\n\
87         bne-            2f\n\
88         stwcx.          %1,0,%2\n\
89         bne-            1b\n\
90         isync\n\
91 2:"     : "=&r" (tmp), "=&r" (tmp2)
92         : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
93         : "cr0", "memory");
94
95         return tmp;
96 }
97
98 int _raw_spin_trylock(spinlock_t *lock)
99 {
100         return __spin_trylock(lock) == 0;
101 }
102
103 EXPORT_SYMBOL(_raw_spin_trylock);
104
105 void _raw_spin_lock(spinlock_t *lock)
106 {
107         while (1) {
108                 if (likely(__spin_trylock(lock) == 0))
109                         break;
110                 do {
111                         HMT_low();
112                         if (SHARED_PROCESSOR)
113                                 __spin_yield(lock);
114                 } while (likely(lock->lock != 0));
115                 HMT_medium();
116         }
117 }
118
119 EXPORT_SYMBOL(_raw_spin_lock);
120
121 void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
122 {
123         unsigned long flags_dis;
124
125         while (1) {
126                 if (likely(__spin_trylock(lock) == 0))
127                         break;
128                 local_save_flags(flags_dis);
129                 local_irq_restore(flags);
130                 do {
131                         HMT_low();
132                         if (SHARED_PROCESSOR)
133                                 __spin_yield(lock);
134                 } while (likely(lock->lock != 0));
135                 HMT_medium();
136                 local_irq_restore(flags_dis);
137         }
138 }
139
140 EXPORT_SYMBOL(_raw_spin_lock_flags);
141
142 void spin_unlock_wait(spinlock_t *lock)
143 {
144         while (lock->lock) {
145                 HMT_low();
146                 if (SHARED_PROCESSOR)
147                         __spin_yield(lock);
148         }
149         HMT_medium();
150 }
151
152 EXPORT_SYMBOL(spin_unlock_wait);
153
154 /*
155  * Waiting for a read lock or a write lock on a rwlock...
156  * This turns out to be the same for read and write locks, since
157  * we only know the holder if it is write-locked.
158  */
159 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
160 void __rw_yield(rwlock_t *rw)
161 {
162         int lock_value;
163         unsigned int holder_cpu, yield_count;
164         struct paca_struct *holder_paca;
165
166         lock_value = rw->lock;
167         if (lock_value >= 0)
168                 return;         /* no write lock at present */
169         holder_cpu = lock_value & 0xffff;
170         BUG_ON(holder_cpu >= NR_CPUS);
171         holder_paca = &paca[holder_cpu];
172         yield_count = holder_paca->lppaca.xYieldCount;
173         if ((yield_count & 1) == 0)
174                 return;         /* virtual cpu is currently running */
175         rmb();
176         if (rw->lock != lock_value)
177                 return;         /* something has changed */
178 #ifdef CONFIG_PPC_ISERIES
179         HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
180                 ((u64)holder_cpu << 32) | yield_count);
181 #else
182         plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
183 #endif
184 }
185
186 #else /* SPLPAR || ISERIES */
187 #define __rw_yield(x)   barrier()
188 #endif
189
190 /*
191  * This returns the old value in the lock + 1,
192  * so we got a read lock if the return value is > 0.
193  */
194 static __inline__ long __read_trylock(rwlock_t *rw)
195 {
196         long tmp;
197
198         __asm__ __volatile__(
199 "1:     lwarx           %0,0,%1         # read_trylock\n\
200         extsw           %0,%0\n\
201         addic.          %0,%0,1\n\
202         ble-            2f\n\
203         stwcx.          %0,0,%1\n\
204         bne-            1b\n\
205         isync\n\
206 2:"     : "=&r" (tmp)
207         : "r" (&rw->lock)
208         : "cr0", "xer", "memory");
209
210         return tmp;
211 }
212
213 int _raw_read_trylock(rwlock_t *rw)
214 {
215         return __read_trylock(rw) > 0;
216 }
217
218 EXPORT_SYMBOL(_raw_read_trylock);
219
220 void _raw_read_lock(rwlock_t *rw)
221 {
222         while (1) {
223                 if (likely(__read_trylock(rw) > 0))
224                         break;
225                 do {
226                         HMT_low();
227                         if (SHARED_PROCESSOR)
228                                 __rw_yield(rw);
229                 } while (likely(rw->lock < 0));
230                 HMT_medium();
231         }
232 }
233
234 EXPORT_SYMBOL(_raw_read_lock);
235
236 void _raw_read_unlock(rwlock_t *rw)
237 {
238         long tmp;
239
240         __asm__ __volatile__(
241         "eieio                          # read_unlock\n\
242 1:      lwarx           %0,0,%1\n\
243         addic           %0,%0,-1\n\
244         stwcx.          %0,0,%1\n\
245         bne-            1b"
246         : "=&r"(tmp)
247         : "r"(&rw->lock)
248         : "cr0", "memory");
249 }
250
251 EXPORT_SYMBOL(_raw_read_unlock);
252
253 /*
254  * This returns the old value in the lock,
255  * so we got the write lock if the return value is 0.
256  */
257 static __inline__ long __write_trylock(rwlock_t *rw)
258 {
259         long tmp, tmp2;
260
261         __asm__ __volatile__(
262 "       lwz             %1,%3(13)       # write_trylock\n\
263 1:      lwarx           %0,0,%2\n\
264         cmpwi           0,%0,0\n\
265         bne-            2f\n\
266         stwcx.          %1,0,%2\n\
267         bne-            1b\n\
268         isync\n\
269 2:"     : "=&r" (tmp), "=&r" (tmp2)
270         : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
271         : "cr0", "memory");
272
273         return tmp;
274 }
275
276 int _raw_write_trylock(rwlock_t *rw)
277 {
278         return __write_trylock(rw) == 0;
279 }
280
281 EXPORT_SYMBOL(_raw_write_trylock);
282
283 void _raw_write_lock(rwlock_t *rw)
284 {
285         while (1) {
286                 if (likely(__write_trylock(rw) == 0))
287                         break;
288                 do {
289                         HMT_low();
290                         if (SHARED_PROCESSOR)
291                                 __rw_yield(rw);
292                 } while (likely(rw->lock != 0));
293                 HMT_medium();
294         }
295 }
296
297 EXPORT_SYMBOL(_raw_write_lock);
298
299 #endif /* CONFIG_SPINLINE */