2 * Spin and read/write lock operations.
4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7 * Rework to support virtual processors
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <asm/hvcall.h>
20 #include <asm/iSeries/HvCall.h>
22 #ifndef CONFIG_SPINLINE
25 * On a system with shared processors (that is, where a physical
26 * processor is multiplexed between several virtual processors),
27 * there is no point spinning on a lock if the holder of the lock
28 * isn't currently scheduled on a physical processor. Instead
29 * we detect this situation and ask the hypervisor to give the
30 * rest of our timeslice to the lock holder.
32 * So that we can tell which virtual processor is holding a lock,
33 * we put 0x80000000 | smp_processor_id() in the lock when it is
34 * held. Conveniently, we have a word in the paca that holds this
38 /* waiting for a spinlock... */
39 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
40 void __spin_yield(spinlock_t *lock)
42 unsigned int lock_value, holder_cpu, yield_count;
43 struct paca_struct *holder_paca;
45 lock_value = lock->lock;
48 holder_cpu = lock_value & 0xffff;
49 BUG_ON(holder_cpu >= NR_CPUS);
50 holder_paca = &paca[holder_cpu];
51 yield_count = holder_paca->xLpPaca.xYieldCount;
52 if ((yield_count & 1) == 0)
53 return; /* virtual cpu is currently running */
55 if (lock->lock != lock_value)
56 return; /* something has changed */
57 #ifdef CONFIG_PPC_ISERIES
58 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
59 ((u64)holder_cpu << 32) | yield_count);
61 plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
65 #else /* SPLPAR || ISERIES */
66 #define __spin_yield(x) barrier()
70 * This returns the old value in the lock, so we succeeded
71 * in getting the lock if the return value is 0.
73 static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
75 unsigned long tmp, tmp2;
78 " lwz %1,24(13) # __spin_trylock\n\
85 2:" : "=&r" (tmp), "=&r" (tmp2)
92 int _raw_spin_trylock(spinlock_t *lock)
94 return __spin_trylock(lock) == 0;
97 EXPORT_SYMBOL(_raw_spin_trylock);
99 void _raw_spin_lock(spinlock_t *lock)
102 if (likely(__spin_trylock(lock) == 0))
107 } while (likely(lock->lock != 0));
112 EXPORT_SYMBOL(_raw_spin_lock);
114 void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
116 unsigned long flags_dis;
119 if (likely(__spin_trylock(lock) == 0))
121 local_save_flags(flags_dis);
122 local_irq_restore(flags);
126 } while (likely(lock->lock != 0));
128 local_irq_restore(flags_dis);
132 EXPORT_SYMBOL(_raw_spin_lock_flags);
134 void spin_unlock_wait(spinlock_t *lock)
140 EXPORT_SYMBOL(spin_unlock_wait);
143 * Waiting for a read lock or a write lock on a rwlock...
144 * This turns out to be the same for read and write locks, since
145 * we only know the holder if it is write-locked.
147 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
148 void __rw_yield(rwlock_t *rw)
151 unsigned int holder_cpu, yield_count;
152 struct paca_struct *holder_paca;
154 lock_value = rw->lock;
156 return; /* no write lock at present */
157 holder_cpu = lock_value & 0xffff;
158 BUG_ON(holder_cpu >= NR_CPUS);
159 holder_paca = &paca[holder_cpu];
160 yield_count = holder_paca->xLpPaca.xYieldCount;
161 if ((yield_count & 1) == 0)
162 return; /* virtual cpu is currently running */
164 if (rw->lock != lock_value)
165 return; /* something has changed */
166 #ifdef CONFIG_PPC_ISERIES
167 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
168 ((u64)holder_cpu << 32) | yield_count);
170 plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
174 #else /* SPLPAR || ISERIES */
175 #define __rw_yield(x) barrier()
179 * This returns the old value in the lock + 1,
180 * so we got a read lock if the return value is > 0.
182 static __inline__ long __read_trylock(rwlock_t *rw)
186 __asm__ __volatile__(
187 "1: lwarx %0,0,%1 # read_trylock\n\
196 : "cr0", "xer", "memory");
201 int _raw_read_trylock(rwlock_t *rw)
203 return __read_trylock(rw) > 0;
206 EXPORT_SYMBOL(_raw_read_trylock);
208 void _raw_read_lock(rwlock_t *rw)
211 if (likely(__read_trylock(rw) > 0))
216 } while (likely(rw->lock < 0));
221 EXPORT_SYMBOL(_raw_read_lock);
223 void _raw_read_unlock(rwlock_t *rw)
227 __asm__ __volatile__(
228 "eieio # read_unlock\n\
238 EXPORT_SYMBOL(_raw_read_unlock);
241 * This returns the old value in the lock,
242 * so we got the write lock if the return value is 0.
244 static __inline__ long __write_trylock(rwlock_t *rw)
248 __asm__ __volatile__(
249 " lwz %1,24(13) # write_trylock\n\
256 2:" : "=&r" (tmp), "=&r" (tmp2)
263 int _raw_write_trylock(rwlock_t *rw)
265 return __write_trylock(rw) == 0;
268 EXPORT_SYMBOL(_raw_write_trylock);
270 void _raw_write_lock(rwlock_t *rw)
273 if (likely(__write_trylock(rw) == 0))
278 } while (likely(rw->lock != 0));
283 EXPORT_SYMBOL(_raw_write_lock);
285 #endif /* CONFIG_SPINLINE */