This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / ppc64 / lib / locks.c
1 /*
2  * Spin and read/write lock operations.
3  *
4  * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5  * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6  * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7  *   Rework to support virtual processors
8  *
9  * This program is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * as published by the Free Software Foundation; either version
12  * 2 of the License, or (at your option) any later version.
13  */
14
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <asm/hvcall.h>
20 #include <asm/iSeries/HvCall.h>
21
22 #ifndef CONFIG_SPINLINE
23
24 /*
25  * On a system with shared processors (that is, where a physical
26  * processor is multiplexed between several virtual processors),
27  * there is no point spinning on a lock if the holder of the lock
28  * isn't currently scheduled on a physical processor.  Instead
29  * we detect this situation and ask the hypervisor to give the
30  * rest of our timeslice to the lock holder.
31  *
32  * So that we can tell which virtual processor is holding a lock,
33  * we put 0x80000000 | smp_processor_id() in the lock when it is
34  * held.  Conveniently, we have a word in the paca that holds this
35  * value.
36  */
37
38 /* waiting for a spinlock... */
39 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
40 void __spin_yield(spinlock_t *lock)
41 {
42         unsigned int lock_value, holder_cpu, yield_count;
43         struct paca_struct *holder_paca;
44
45         lock_value = lock->lock;
46         if (lock_value == 0)
47                 return;
48         holder_cpu = lock_value & 0xffff;
49         BUG_ON(holder_cpu >= NR_CPUS);
50         holder_paca = &paca[holder_cpu];
51         yield_count = holder_paca->xLpPaca.xYieldCount;
52         if ((yield_count & 1) == 0)
53                 return;         /* virtual cpu is currently running */
54         rmb();
55         if (lock->lock != lock_value)
56                 return;         /* something has changed */
57 #ifdef CONFIG_PPC_ISERIES
58         HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
59                 ((u64)holder_cpu << 32) | yield_count);
60 #else
61         plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
62 #endif
63 }
64
65 #else /* SPLPAR || ISERIES */
66 #define __spin_yield(x) barrier()
67 #endif
68
69 /*
70  * This returns the old value in the lock, so we succeeded
71  * in getting the lock if the return value is 0.
72  */
73 static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
74 {
75         unsigned long tmp, tmp2;
76
77         __asm__ __volatile__(
78 "       lwz             %1,24(13)               # __spin_trylock\n\
79 1:      lwarx           %0,0,%2\n\
80         cmpwi           0,%0,0\n\
81         bne-            2f\n\
82         stwcx.          %1,0,%2\n\
83         bne-            1b\n\
84         isync\n\
85 2:"     : "=&r" (tmp), "=&r" (tmp2)
86         : "r" (&lock->lock)
87         : "cr0", "memory");
88
89         return tmp;
90 }
91
92 int _raw_spin_trylock(spinlock_t *lock)
93 {
94         return __spin_trylock(lock) == 0;
95 }
96
97 EXPORT_SYMBOL(_raw_spin_trylock);
98
99 void _raw_spin_lock(spinlock_t *lock)
100 {
101         while (1) {
102                 if (likely(__spin_trylock(lock) == 0))
103                         break;
104                 do {
105                         HMT_low();
106                         __spin_yield(lock);
107                 } while (likely(lock->lock != 0));
108                 HMT_medium();
109         }
110 }
111
112 EXPORT_SYMBOL(_raw_spin_lock);
113
114 void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
115 {
116         unsigned long flags_dis;
117
118         while (1) {
119                 if (likely(__spin_trylock(lock) == 0))
120                         break;
121                 local_save_flags(flags_dis);
122                 local_irq_restore(flags);
123                 do {
124                         HMT_low();
125                         __spin_yield(lock);
126                 } while (likely(lock->lock != 0));
127                 HMT_medium();
128                 local_irq_restore(flags_dis);
129         }
130 }
131
132 EXPORT_SYMBOL(_raw_spin_lock_flags);
133
134 void spin_unlock_wait(spinlock_t *lock)
135 {
136         while (lock->lock)
137                 __spin_yield(lock);
138 }
139
140 EXPORT_SYMBOL(spin_unlock_wait);
141
142 /*
143  * Waiting for a read lock or a write lock on a rwlock...
144  * This turns out to be the same for read and write locks, since
145  * we only know the holder if it is write-locked.
146  */
147 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
148 void __rw_yield(rwlock_t *rw)
149 {
150         int lock_value;
151         unsigned int holder_cpu, yield_count;
152         struct paca_struct *holder_paca;
153
154         lock_value = rw->lock;
155         if (lock_value >= 0)
156                 return;         /* no write lock at present */
157         holder_cpu = lock_value & 0xffff;
158         BUG_ON(holder_cpu >= NR_CPUS);
159         holder_paca = &paca[holder_cpu];
160         yield_count = holder_paca->xLpPaca.xYieldCount;
161         if ((yield_count & 1) == 0)
162                 return;         /* virtual cpu is currently running */
163         rmb();
164         if (rw->lock != lock_value)
165                 return;         /* something has changed */
166 #ifdef CONFIG_PPC_ISERIES
167         HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
168                 ((u64)holder_cpu << 32) | yield_count);
169 #else
170         plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
171 #endif
172 }
173
174 #else /* SPLPAR || ISERIES */
175 #define __rw_yield(x)   barrier()
176 #endif
177
178 /*
179  * This returns the old value in the lock + 1,
180  * so we got a read lock if the return value is > 0.
181  */
182 static __inline__ long __read_trylock(rwlock_t *rw)
183 {
184         long tmp;
185
186         __asm__ __volatile__(
187 "1:     lwarx           %0,0,%1         # read_trylock\n\
188         extsw           %0,%0\n\
189         addic.          %0,%0,1\n\
190         ble-            2f\n\
191         stwcx.          %0,0,%1\n\
192         bne-            1b\n\
193         isync\n\
194 2:"     : "=&r" (tmp)
195         : "r" (&rw->lock)
196         : "cr0", "xer", "memory");
197
198         return tmp;
199 }
200
201 int _raw_read_trylock(rwlock_t *rw)
202 {
203         return __read_trylock(rw) > 0;
204 }
205
206 EXPORT_SYMBOL(_raw_read_trylock);
207
208 void _raw_read_lock(rwlock_t *rw)
209 {
210         while (1) {
211                 if (likely(__read_trylock(rw) > 0))
212                         break;
213                 do {
214                         HMT_low();
215                         __rw_yield(rw);
216                 } while (likely(rw->lock < 0));
217                 HMT_medium();
218         }
219 }
220
221 EXPORT_SYMBOL(_raw_read_lock);
222
223 void _raw_read_unlock(rwlock_t *rw)
224 {
225         long tmp;
226
227         __asm__ __volatile__(
228         "eieio                          # read_unlock\n\
229 1:      lwarx           %0,0,%1\n\
230         addic           %0,%0,-1\n\
231         stwcx.          %0,0,%1\n\
232         bne-            1b"
233         : "=&r"(tmp)
234         : "r"(&rw->lock)
235         : "cr0", "memory");
236 }
237
238 EXPORT_SYMBOL(_raw_read_unlock);
239
240 /*
241  * This returns the old value in the lock,
242  * so we got the write lock if the return value is 0.
243  */
244 static __inline__ long __write_trylock(rwlock_t *rw)
245 {
246         long tmp, tmp2;
247
248         __asm__ __volatile__(
249 "       lwz             %1,24(13)               # write_trylock\n\
250 1:      lwarx           %0,0,%2\n\
251         cmpwi           0,%0,0\n\
252         bne-            2f\n\
253         stwcx.          %1,0,%2\n\
254         bne-            1b\n\
255         isync\n\
256 2:"     : "=&r" (tmp), "=&r" (tmp2)
257         : "r" (&rw->lock)
258         : "cr0", "memory");
259
260         return tmp;
261 }
262
263 int _raw_write_trylock(rwlock_t *rw)
264 {
265         return __write_trylock(rw) == 0;
266 }
267
268 EXPORT_SYMBOL(_raw_write_trylock);
269
270 void _raw_write_lock(rwlock_t *rw)
271 {
272         while (1) {
273                 if (likely(__write_trylock(rw) == 0))
274                         break;
275                 do {
276                         HMT_low();
277                         __rw_yield(rw);
278                 } while (likely(rw->lock != 0));
279                 HMT_medium();
280         }
281 }
282
283 EXPORT_SYMBOL(_raw_write_lock);
284
285 #endif /* CONFIG_SPINLINE */