/*
* Simple spin lock operations.
*
- * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM
+ * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
+ * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
+ * Rework to support virtual processors
*
* Type of int is used as a full 64b word is not necessary.
*
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/config.h>
+#include <asm/paca.h>
+#include <asm/hvcall.h>
+#include <asm/iSeries/HvCall.h>
+
typedef struct {
volatile unsigned int lock;
+#ifdef CONFIG_PREEMPT
+ unsigned int break_lock;
+#endif
} spinlock_t;
+typedef struct {
+ volatile signed int lock;
+#ifdef CONFIG_PREEMPT
+ unsigned int break_lock;
+#endif
+} rwlock_t;
+
#ifdef __KERNEL__
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_is_locked(x) ((x)->lock != 0)
+#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
+
+static __inline__ void _raw_spin_unlock(spinlock_t *lock)
+{
+ __asm__ __volatile__("lwsync # spin_unlock": : :"memory");
+ lock->lock = 0;
+}
-static __inline__ int _raw_spin_trylock(spinlock_t *lock)
+/*
+ * On a system with shared processors (that is, where a physical
+ * processor is multiplexed between several virtual processors),
+ * there is no point spinning on a lock if the holder of the lock
+ * isn't currently scheduled on a physical processor. Instead
+ * we detect this situation and ask the hypervisor to give the
+ * rest of our timeslice to the lock holder.
+ *
+ * So that we can tell which virtual processor is holding a lock,
+ * we put 0x80000000 | smp_processor_id() in the lock when it is
+ * held. Conveniently, we have a word in the paca that holds this
+ * value.
+ */
+
+#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
+/* We only yield to the hypervisor if we are in shared processor mode */
+#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
+extern void __spin_yield(spinlock_t *lock);
+extern void __rw_yield(rwlock_t *lock);
+#else /* SPLPAR || ISERIES */
+#define __spin_yield(x) barrier()
+#define __rw_yield(x) barrier()
+#define SHARED_PROCESSOR 0
+#endif
+extern void spin_unlock_wait(spinlock_t *lock);
+
+/*
+ * This returns the old value in the lock, so we succeeded
+ * in getting the lock if the return value is 0.
+ */
+static __inline__ unsigned long __spin_trylock(spinlock_t *lock)
{
- unsigned int tmp;
+ unsigned long tmp, tmp2;
__asm__ __volatile__(
-"1: lwarx %0,0,%1 # spin_trylock\n\
+" lwz %1,%3(13) # __spin_trylock\n\
+1: lwarx %0,0,%2\n\
cmpwi 0,%0,0\n\
- li %0,0\n\
bne- 2f\n\
- li %0,1\n\
- stwcx. %0,0,%1\n\
+ stwcx. %1,0,%2\n\
bne- 1b\n\
isync\n\
-2:" : "=&r"(tmp)
- : "r"(&lock->lock)
+2:" : "=&r" (tmp), "=&r" (tmp2)
+ : "r" (&lock->lock), "i" (offsetof(struct paca_struct, lock_token))
: "cr0", "memory");
return tmp;
}
-static __inline__ void _raw_spin_lock(spinlock_t *lock)
+static int __inline__ _raw_spin_trylock(spinlock_t *lock)
{
- unsigned int tmp;
+ return __spin_trylock(lock) == 0;
+}
- __asm__ __volatile__(
- "b 2f # spin_lock\n\
-1:"
- HMT_LOW
-" lwzx %0,0,%1\n\
- cmpwi 0,%0,0\n\
- bne+ 1b\n"
- HMT_MEDIUM
-"2: lwarx %0,0,%1\n\
- cmpwi 0,%0,0\n\
- bne- 1b\n\
- stwcx. %2,0,%1\n\
- bne- 2b\n\
- isync"
- : "=&r"(tmp)
- : "r"(&lock->lock), "r"(1)
- : "cr0", "memory");
+static void __inline__ _raw_spin_lock(spinlock_t *lock)
+{
+ while (1) {
+ if (likely(__spin_trylock(lock) == 0))
+ break;
+ do {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __spin_yield(lock);
+ } while (unlikely(lock->lock != 0));
+ HMT_medium();
+ }
}
-static __inline__ void _raw_spin_unlock(spinlock_t *lock)
+static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags)
{
- __asm__ __volatile__("eieio # spin_unlock": : :"memory");
- lock->lock = 0;
+ unsigned long flags_dis;
+
+ while (1) {
+ if (likely(__spin_trylock(lock) == 0))
+ break;
+ local_save_flags(flags_dis);
+ local_irq_restore(flags);
+ do {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __spin_yield(lock);
+ } while (unlikely(lock->lock != 0));
+ HMT_medium();
+ local_irq_restore(flags_dis);
+ }
}
/*
* irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*/
-typedef struct {
- volatile signed int lock;
-} rwlock_t;
-
#define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
-static __inline__ int _raw_read_trylock(rwlock_t *rw)
+#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+
+#define read_can_lock(rw) ((rw)->lock >= 0)
+#define write_can_lock(rw) (!(rw)->lock)
+
+static __inline__ void _raw_write_unlock(rwlock_t *rw)
{
- unsigned int tmp;
- unsigned int ret;
+ __asm__ __volatile__("lwsync # write_unlock": : :"memory");
+ rw->lock = 0;
+}
+
+/*
+ * This returns the old value in the lock + 1,
+ * so we got a read lock if the return value is > 0.
+ */
+static long __inline__ __read_trylock(rwlock_t *rw)
+{
+ long tmp;
__asm__ __volatile__(
-"1: lwarx %0,0,%2 # read_trylock\n\
- li %1,0\n\
+"1: lwarx %0,0,%1 # read_trylock\n\
extsw %0,%0\n\
addic. %0,%0,1\n\
ble- 2f\n\
- stwcx. %0,0,%2\n\
+ stwcx. %0,0,%1\n\
bne- 1b\n\
- li %1,1\n\
isync\n\
-2:" : "=&r"(tmp), "=&r"(ret)
- : "r"(&rw->lock)
- : "cr0", "memory");
+2:" : "=&r" (tmp)
+ : "r" (&rw->lock)
+ : "cr0", "xer", "memory");
- return ret;
+ return tmp;
}
-static __inline__ void _raw_read_lock(rwlock_t *rw)
+static int __inline__ _raw_read_trylock(rwlock_t *rw)
{
- unsigned int tmp;
+ return __read_trylock(rw) > 0;
+}
- __asm__ __volatile__(
- "b 2f # read_lock\n\
-1:"
- HMT_LOW
-" lwax %0,0,%1\n\
- cmpwi 0,%0,0\n\
- blt+ 1b\n"
- HMT_MEDIUM
-"2: lwarx %0,0,%1\n\
- extsw %0,%0\n\
- addic. %0,%0,1\n\
- ble- 1b\n\
- stwcx. %0,0,%1\n\
- bne- 2b\n\
- isync"
- : "=&r"(tmp)
- : "r"(&rw->lock)
- : "cr0", "memory");
+static void __inline__ _raw_read_lock(rwlock_t *rw)
+{
+ while (1) {
+ if (likely(__read_trylock(rw) > 0))
+ break;
+ do {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __rw_yield(rw);
+ } while (unlikely(rw->lock < 0));
+ HMT_medium();
+ }
}
-static __inline__ void _raw_read_unlock(rwlock_t *rw)
+static void __inline__ _raw_read_unlock(rwlock_t *rw)
{
- unsigned int tmp;
+ long tmp;
__asm__ __volatile__(
"eieio # read_unlock\n\
: "cr0", "memory");
}
-static __inline__ int _raw_write_trylock(rwlock_t *rw)
+/*
+ * This returns the old value in the lock,
+ * so we got the write lock if the return value is 0.
+ */
+static __inline__ long __write_trylock(rwlock_t *rw)
{
- unsigned int tmp;
- unsigned int ret;
+ long tmp, tmp2;
__asm__ __volatile__(
-"1: lwarx %0,0,%2 # write_trylock\n\
+" lwz %1,%3(13) # write_trylock\n\
+1: lwarx %0,0,%2\n\
cmpwi 0,%0,0\n\
- li %1,0\n\
bne- 2f\n\
- stwcx. %3,0,%2\n\
+ stwcx. %1,0,%2\n\
bne- 1b\n\
- li %1,1\n\
isync\n\
-2:" : "=&r"(tmp), "=&r"(ret)
- : "r"(&rw->lock), "r"(-1)
+2:" : "=&r" (tmp), "=&r" (tmp2)
+ : "r" (&rw->lock), "i" (offsetof(struct paca_struct, lock_token))
: "cr0", "memory");
- return ret;
-}
-
-static __inline__ void _raw_write_lock(rwlock_t *rw)
-{
- unsigned int tmp;
-
- __asm__ __volatile__(
- "b 2f # write_lock\n\
-1:"
- HMT_LOW
- "lwax %0,0,%1\n\
- cmpwi 0,%0,0\n\
- bne+ 1b\n"
- HMT_MEDIUM
-"2: lwarx %0,0,%1\n\
- cmpwi 0,%0,0\n\
- bne- 1b\n\
- stwcx. %2,0,%1\n\
- bne- 2b\n\
- isync"
- : "=&r"(tmp)
- : "r"(&rw->lock), "r"(-1)
- : "cr0", "memory");
-}
-
-static __inline__ void _raw_write_unlock(rwlock_t *rw)
-{
- __asm__ __volatile__("eieio # write_unlock": : :"memory");
- rw->lock = 0;
+ return tmp;
}
-static __inline__ int is_read_locked(rwlock_t *rw)
+static int __inline__ _raw_write_trylock(rwlock_t *rw)
{
- return rw->lock > 0;
+ return __write_trylock(rw) == 0;
}
-static __inline__ int is_write_locked(rwlock_t *rw)
+static void __inline__ _raw_write_lock(rwlock_t *rw)
{
- return rw->lock < 0;
+ while (1) {
+ if (likely(__write_trylock(rw) == 0))
+ break;
+ do {
+ HMT_low();
+ if (SHARED_PROCESSOR)
+ __rw_yield(rw);
+ } while (unlikely(rw->lock != 0));
+ HMT_medium();
+ }
}
-#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-#define spin_unlock_wait(x) do { cpu_relax(); } while(spin_is_locked(x))
-
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
-
-#define rwlock_is_locked(x) ((x)->lock)
-
#endif /* __KERNEL__ */
#endif /* __ASM_SPINLOCK_H */