1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
5 * Simple spin lock operations.
7 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
8 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
10 * Rework to support virtual processors
12 * Type of int is used as a full 64b word is not necessary.
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
19 #include <linux/config.h>
21 #include <asm/hvcall.h>
22 #include <asm/iSeries/HvCall.h>
25 volatile unsigned int lock;
29 volatile signed int lock;
33 #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
35 #define spin_is_locked(x) ((x)->lock != 0)
36 #define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
38 static __inline__ void _raw_spin_unlock(spinlock_t *lock)
40 __asm__ __volatile__("lwsync # spin_unlock": : :"memory");
45 * On a system with shared processors (that is, where a physical
46 * processor is multiplexed between several virtual processors),
47 * there is no point spinning on a lock if the holder of the lock
48 * isn't currently scheduled on a physical processor. Instead
49 * we detect this situation and ask the hypervisor to give the
50 * rest of our timeslice to the lock holder.
52 * So that we can tell which virtual processor is holding a lock,
53 * we put 0x80000000 | smp_processor_id() in the lock when it is
54 * held. Conveniently, we have a word in the paca that holds this
58 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
59 /* We only yield to the hypervisor if we are in shared processor mode */
60 #define SHARED_PROCESSOR (get_paca()->lppaca.xSharedProc)
61 extern void __spin_yield(spinlock_t *lock);
62 extern void __rw_yield(rwlock_t *lock);
63 #else /* SPLPAR || ISERIES */
64 #define __spin_yield(x) barrier()
65 #define __rw_yield(x) barrier()
66 #define SHARED_PROCESSOR 0
68 extern void spin_unlock_wait(spinlock_t *lock);
71 * This returns the old value in the lock, so we succeeded
72 * in getting the lock if the return value is 0.
74 static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
76 unsigned long tmp, tmp2;
79 " lwz %1,%3(13) # __spin_trylock\n\
86 2:" : "=&r" (tmp), "=&r" (tmp2)
87 : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
93 static int __inline__ _raw_spin_trylock(spinlock_t *lock)
95 return __spin_trylock(lock) == 0;
98 static void __inline__ _raw_spin_lock(spinlock_t *lock)
101 if (likely(__spin_trylock(lock) == 0))
105 if (SHARED_PROCESSOR)
107 } while (likely(lock->lock != 0));
112 static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
114 unsigned long flags_dis;
117 if (likely(__spin_trylock(lock) == 0))
119 local_save_flags(flags_dis);
120 local_irq_restore(flags);
123 if (SHARED_PROCESSOR)
125 } while (likely(lock->lock != 0));
127 local_irq_restore(flags_dis);
132 * Read-write spinlocks, allowing multiple readers
133 * but only one writer.
135 * NOTE! it is quite common to have readers in interrupts
136 * but no interrupt writers. For those circumstances we
137 * can "mix" irq-safe locks - any writer needs to get a
138 * irq-safe write-lock, but readers can get non-irqsafe
141 #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
143 #define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
144 #define rwlock_is_locked(x) ((x)->lock)
146 static __inline__ int is_read_locked(rwlock_t *rw)
151 static __inline__ int is_write_locked(rwlock_t *rw)
156 static __inline__ void _raw_write_unlock(rwlock_t *rw)
158 __asm__ __volatile__("lwsync # write_unlock": : :"memory");
163 * This returns the old value in the lock + 1,
164 * so we got a read lock if the return value is > 0.
166 static long __inline__ __read_trylock(rwlock_t *rw)
170 __asm__ __volatile__(
171 "1: lwarx %0,0,%1 # read_trylock\n\
180 : "cr0", "xer", "memory");
185 static int __inline__ _raw_read_trylock(rwlock_t *rw)
187 return __read_trylock(rw) > 0;
190 static void __inline__ _raw_read_lock(rwlock_t *rw)
193 if (likely(__read_trylock(rw) > 0))
197 if (SHARED_PROCESSOR)
199 } while (likely(rw->lock < 0));
204 static void __inline__ _raw_read_unlock(rwlock_t *rw)
208 __asm__ __volatile__(
209 "eieio # read_unlock\n\
220 * This returns the old value in the lock,
221 * so we got the write lock if the return value is 0.
223 static __inline__ long __write_trylock(rwlock_t *rw)
227 __asm__ __volatile__(
228 " lwz %1,%3(13) # write_trylock\n\
235 2:" : "=&r" (tmp), "=&r" (tmp2)
236 : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
242 static int __inline__ _raw_write_trylock(rwlock_t *rw)
244 return __write_trylock(rw) == 0;
247 static void __inline__ _raw_write_lock(rwlock_t *rw)
250 if (likely(__write_trylock(rw) == 0))
254 if (SHARED_PROCESSOR)
256 } while (likely(rw->lock != 0));
261 #endif /* __KERNEL__ */
262 #endif /* __ASM_SPINLOCK_H */