X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-ppc64%2Fspinlock.h;h=a16b2ff58d4184f22e52f8e77b7a1cee2a4a5859;hb=720b94a4e7548e78be55ab8fd3be4686c57dc808;hp=4181bd232267cfa7a38525dd550fc7bf0a4b6f77;hpb=86090fcac5e27b630656fe3d963a6b80e26dac44;p=linux-2.6.git diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h index 4181bd232..a16b2ff58 100644 --- a/include/asm-ppc64/spinlock.h +++ b/include/asm-ppc64/spinlock.h @@ -4,7 +4,7 @@ /* * Simple spin lock operations. * - * Copyright (C) 2001 Paul Mackerras , IBM + * Copyright (C) 2001-2004 Paul Mackerras , IBM * Copyright (C) 2001 Anton Blanchard , IBM * * Type of int is used as a full 64b word is not necessary. @@ -14,6 +14,8 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#include + typedef struct { volatile unsigned int lock; } spinlock_t; @@ -22,25 +24,48 @@ typedef struct { #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 } #define spin_is_locked(x) ((x)->lock != 0) +#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) + +static __inline__ void _raw_spin_unlock(spinlock_t *lock) +{ + __asm__ __volatile__("lwsync # spin_unlock": : :"memory"); + lock->lock = 0; +} + +/* + * Normally we use the spinlock functions in arch/ppc64/lib/locks.c. + * For special applications such as profiling, we can have the + * spinlock functions inline by defining CONFIG_SPINLINE. + * This is not recommended on partitioned systems with shared + * processors, since the inline spinlock functions don't include + * the code for yielding the CPU to the lock holder. + */ + +#ifndef CONFIG_SPINLINE +extern int _raw_spin_trylock(spinlock_t *lock); +extern void _raw_spin_lock(spinlock_t *lock); +extern void _raw_spin_lock_flags(spinlock_t *lock, unsigned long flags); +extern void spin_unlock_wait(spinlock_t *lock); + +#else static __inline__ int _raw_spin_trylock(spinlock_t *lock) { - unsigned int tmp; + unsigned int tmp, tmp2; __asm__ __volatile__( -"1: lwarx %0,0,%1 # spin_trylock\n\ +"1: lwarx %0,0,%2 # spin_trylock\n\ cmpwi 0,%0,0\n\ - li %0,0\n\ bne- 2f\n\ - li %0,1\n\ - stwcx. %0,0,%1\n\ + lwz %1,24(13)\n\ + stwcx. %1,0,%2\n\ bne- 1b\n\ isync\n\ -2:" : "=&r"(tmp) +2:" : "=&r"(tmp), "=&r"(tmp2) : "r"(&lock->lock) : "cr0", "memory"); - return tmp; + return tmp == 0; } static __inline__ void _raw_spin_lock(spinlock_t *lock) @@ -58,20 +83,51 @@ static __inline__ void _raw_spin_lock(spinlock_t *lock) "2: lwarx %0,0,%1\n\ cmpwi 0,%0,0\n\ bne- 1b\n\ - stwcx. %2,0,%1\n\ + lwz %0,24(13)\n\ + stwcx. %0,0,%1\n\ bne- 2b\n\ isync" : "=&r"(tmp) - : "r"(&lock->lock), "r"(1) + : "r"(&lock->lock) : "cr0", "memory"); } -static __inline__ void _raw_spin_unlock(spinlock_t *lock) +/* + * Note: if we ever want to inline the spinlocks on iSeries, + * we will have to change the irq enable/disable stuff in here. + */ +static __inline__ void _raw_spin_lock_flags(spinlock_t *lock, + unsigned long flags) { - __asm__ __volatile__("eieio # spin_unlock": : :"memory"); - lock->lock = 0; + unsigned int tmp; + unsigned long tmp2; + + __asm__ __volatile__( + "b 3f # spin_lock\n\ +1: mfmsr %1\n\ + mtmsrd %3,1\n\ +2:" HMT_LOW +" lwzx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne+ 2b\n" + HMT_MEDIUM +" mtmsrd %1,1\n\ +3: lwarx %0,0,%2\n\ + cmpwi 0,%0,0\n\ + bne- 1b\n\ + lwz %1,24(13)\n\ + stwcx. %1,0,%2\n\ + bne- 3b\n\ + isync" + : "=&r"(tmp), "=&r"(tmp2) + : "r"(&lock->lock), "r"(flags) + : "cr0", "memory"); } +#define spin_unlock_wait(x) do { cpu_relax(); } while (spin_is_locked(x)) + +#endif /* CONFIG_SPINLINE */ + /* * Read-write spinlocks, allowing multiple readers * but only one writer. @@ -88,6 +144,34 @@ typedef struct { #define RW_LOCK_UNLOCKED (rwlock_t) { 0 } +#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) +#define rwlock_is_locked(x) ((x)->lock) + +static __inline__ int is_read_locked(rwlock_t *rw) +{ + return rw->lock > 0; +} + +static __inline__ int is_write_locked(rwlock_t *rw) +{ + return rw->lock < 0; +} + +static __inline__ void _raw_write_unlock(rwlock_t *rw) +{ + __asm__ __volatile__("lwsync # write_unlock": : :"memory"); + rw->lock = 0; +} + +#ifndef CONFIG_SPINLINE +extern int _raw_read_trylock(rwlock_t *rw); +extern void _raw_read_lock(rwlock_t *rw); +extern void _raw_read_unlock(rwlock_t *rw); +extern int _raw_write_trylock(rwlock_t *rw); +extern void _raw_write_lock(rwlock_t *rw); +extern void _raw_write_unlock(rwlock_t *rw); + +#else static __inline__ int _raw_read_trylock(rwlock_t *rw) { unsigned int tmp; @@ -139,7 +223,7 @@ static __inline__ void _raw_read_unlock(rwlock_t *rw) unsigned int tmp; __asm__ __volatile__( - "eieio # read_unlock\n\ + "lwsync # read_unlock\n\ 1: lwarx %0,0,%1\n\ addic %0,%0,-1\n\ stwcx. %0,0,%1\n\ @@ -192,29 +276,7 @@ static __inline__ void _raw_write_lock(rwlock_t *rw) : "r"(&rw->lock), "r"(-1) : "cr0", "memory"); } - -static __inline__ void _raw_write_unlock(rwlock_t *rw) -{ - __asm__ __volatile__("eieio # write_unlock": : :"memory"); - rw->lock = 0; -} - -static __inline__ int is_read_locked(rwlock_t *rw) -{ - return rw->lock > 0; -} - -static __inline__ int is_write_locked(rwlock_t *rw) -{ - return rw->lock < 0; -} - -#define spin_lock_init(x) do { *(x) = SPIN_LOCK_UNLOCKED; } while(0) -#define spin_unlock_wait(x) do { cpu_relax(); } while(spin_is_locked(x)) - -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0) - -#define rwlock_is_locked(x) ((x)->lock) +#endif /* CONFIG_SPINLINE */ #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */