2 * Spin and read/write lock operations.
4 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
5 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
6 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
7 * Rework to support virtual processors
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <linux/config.h>
16 #include <linux/kernel.h>
17 #include <linux/spinlock.h>
18 #include <linux/module.h>
19 #include <linux/stringify.h>
20 #include <asm/hvcall.h>
21 #include <asm/iSeries/HvCall.h>
23 #ifndef CONFIG_SPINLINE
26 * On a system with shared processors (that is, where a physical
27 * processor is multiplexed between several virtual processors),
28 * there is no point spinning on a lock if the holder of the lock
29 * isn't currently scheduled on a physical processor. Instead
30 * we detect this situation and ask the hypervisor to give the
31 * rest of our timeslice to the lock holder.
33 * So that we can tell which virtual processor is holding a lock,
34 * we put 0x80000000 | smp_processor_id() in the lock when it is
35 * held. Conveniently, we have a word in the paca that holds this
39 /* waiting for a spinlock... */
40 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
42 /* We only yield to the hypervisor if we are in shared processor mode */
43 #define SHARED_PROCESSOR (get_paca()->lppaca.xSharedProc)
45 void __spin_yield(spinlock_t *lock)
47 unsigned int lock_value, holder_cpu, yield_count;
48 struct paca_struct *holder_paca;
50 lock_value = lock->lock;
53 holder_cpu = lock_value & 0xffff;
54 BUG_ON(holder_cpu >= NR_CPUS);
55 holder_paca = &paca[holder_cpu];
56 yield_count = holder_paca->lppaca.xYieldCount;
57 if ((yield_count & 1) == 0)
58 return; /* virtual cpu is currently running */
60 if (lock->lock != lock_value)
61 return; /* something has changed */
62 #ifdef CONFIG_PPC_ISERIES
63 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
64 ((u64)holder_cpu << 32) | yield_count);
66 plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
70 #else /* SPLPAR || ISERIES */
71 #define __spin_yield(x) barrier()
72 #define SHARED_PROCESSOR 0
76 * This returns the old value in the lock, so we succeeded
77 * in getting the lock if the return value is 0.
79 static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
81 unsigned long tmp, tmp2;
84 " lwz %1,%3(13) # __spin_trylock\n\
91 2:" : "=&r" (tmp), "=&r" (tmp2)
92 : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
98 int _raw_spin_trylock(spinlock_t *lock)
100 return __spin_trylock(lock) == 0;
103 EXPORT_SYMBOL(_raw_spin_trylock);
105 void _raw_spin_lock(spinlock_t *lock)
108 if (likely(__spin_trylock(lock) == 0))
112 if (SHARED_PROCESSOR)
114 } while (likely(lock->lock != 0));
119 EXPORT_SYMBOL(_raw_spin_lock);
121 void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
123 unsigned long flags_dis;
126 if (likely(__spin_trylock(lock) == 0))
128 local_save_flags(flags_dis);
129 local_irq_restore(flags);
132 if (SHARED_PROCESSOR)
134 } while (likely(lock->lock != 0));
136 local_irq_restore(flags_dis);
140 EXPORT_SYMBOL(_raw_spin_lock_flags);
142 void spin_unlock_wait(spinlock_t *lock)
146 if (SHARED_PROCESSOR)
152 EXPORT_SYMBOL(spin_unlock_wait);
155 * Waiting for a read lock or a write lock on a rwlock...
156 * This turns out to be the same for read and write locks, since
157 * we only know the holder if it is write-locked.
159 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
160 void __rw_yield(rwlock_t *rw)
163 unsigned int holder_cpu, yield_count;
164 struct paca_struct *holder_paca;
166 lock_value = rw->lock;
168 return; /* no write lock at present */
169 holder_cpu = lock_value & 0xffff;
170 BUG_ON(holder_cpu >= NR_CPUS);
171 holder_paca = &paca[holder_cpu];
172 yield_count = holder_paca->lppaca.xYieldCount;
173 if ((yield_count & 1) == 0)
174 return; /* virtual cpu is currently running */
176 if (rw->lock != lock_value)
177 return; /* something has changed */
178 #ifdef CONFIG_PPC_ISERIES
179 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc,
180 ((u64)holder_cpu << 32) | yield_count);
182 plpar_hcall_norets(H_CONFER, holder_cpu, yield_count);
186 #else /* SPLPAR || ISERIES */
187 #define __rw_yield(x) barrier()
191 * This returns the old value in the lock + 1,
192 * so we got a read lock if the return value is > 0.
194 static __inline__ long __read_trylock(rwlock_t *rw)
198 __asm__ __volatile__(
199 "1: lwarx %0,0,%1 # read_trylock\n\
208 : "cr0", "xer", "memory");
213 int _raw_read_trylock(rwlock_t *rw)
215 return __read_trylock(rw) > 0;
218 EXPORT_SYMBOL(_raw_read_trylock);
220 void _raw_read_lock(rwlock_t *rw)
223 if (likely(__read_trylock(rw) > 0))
227 if (SHARED_PROCESSOR)
229 } while (likely(rw->lock < 0));
234 EXPORT_SYMBOL(_raw_read_lock);
236 void _raw_read_unlock(rwlock_t *rw)
240 __asm__ __volatile__(
241 "eieio # read_unlock\n\
251 EXPORT_SYMBOL(_raw_read_unlock);
254 * This returns the old value in the lock,
255 * so we got the write lock if the return value is 0.
257 static __inline__ long __write_trylock(rwlock_t *rw)
261 __asm__ __volatile__(
262 " lwz %1,%3(13) # write_trylock\n\
269 2:" : "=&r" (tmp), "=&r" (tmp2)
270 : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
276 int _raw_write_trylock(rwlock_t *rw)
278 return __write_trylock(rw) == 0;
281 EXPORT_SYMBOL(_raw_write_trylock);
283 void _raw_write_lock(rwlock_t *rw)
286 if (likely(__write_trylock(rw) == 0))
290 if (SHARED_PROCESSOR)
292 } while (likely(rw->lock != 0));
297 EXPORT_SYMBOL(_raw_write_lock);
299 #endif /* CONFIG_SPINLINE */