X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-sparc64%2Fspinlock.h;h=0006fe9f8c7a03cab556cec5119ad5c016608e74;hb=refs%2Fheads%2Fvserver;hp=f5b09c7da93da1de77a12e7ec1bf9219fc3c4ed5;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index f5b09c7da..0006fe9f8 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h @@ -6,7 +6,6 @@ #ifndef __SPARC64_SPINLOCK_H #define __SPARC64_SPINLOCK_H -#include #include /* For NR_CPUS */ #ifndef __ASSEMBLY__ @@ -29,149 +28,222 @@ * must be pre-V9 branches. */ -#ifndef CONFIG_DEBUG_SPINLOCK +#define __raw_spin_is_locked(lp) ((lp)->lock != 0) -typedef unsigned char spinlock_t; -#define SPIN_LOCK_UNLOCKED 0 +#define __raw_spin_unlock_wait(lp) \ + do { rmb(); \ + } while((lp)->lock) -#define spin_lock_init(lock) (*((unsigned char *)(lock)) = 0) -#define spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) +static inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + unsigned long tmp; -#define spin_unlock_wait(lock) \ -do { membar("#LoadLoad"); \ -} while(*((volatile unsigned char *)lock)) + __asm__ __volatile__( +"1: ldstub [%1], %0\n" +" membar #StoreLoad | #StoreStore\n" +" brnz,pn %0, 2f\n" +" nop\n" +" .subsection 2\n" +"2: ldub [%1], %0\n" +" membar #LoadLoad\n" +" brnz,pt %0, 2b\n" +" nop\n" +" ba,a,pt %%xcc, 1b\n" +" .previous" + : "=&r" (tmp) + : "r" (lock) + : "memory"); +} -static __inline__ void _raw_spin_lock(spinlock_t *lock) +static inline int __raw_spin_trylock(raw_spinlock_t *lock) { + unsigned long result; + __asm__ __volatile__( -"1: ldstub [%0], %%g7\n" -" brnz,pn %%g7, 2f\n" -" membar #StoreLoad | #StoreStore\n" +" ldstub [%1], %0\n" +" membar #StoreLoad | #StoreStore" + : "=r" (result) + : "r" (lock) + : "memory"); + + return (result == 0UL); +} + +static inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __asm__ __volatile__( +" membar #StoreStore | #LoadStore\n" +" stb %%g0, [%0]" + : /* No outputs */ + : "r" (lock) + : "memory"); +} + +static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +{ + unsigned long tmp1, tmp2; + + __asm__ __volatile__( +"1: ldstub [%2], %0\n" +" membar #StoreLoad | #StoreStore\n" +" brnz,pn %0, 2f\n" +" nop\n" " .subsection 2\n" -"2: ldub [%0], %%g7\n" -" brnz,pt %%g7, 2b\n" -" membar #LoadLoad\n" -" b,a,pt %%xcc, 1b\n" -" .previous\n" - : /* no outputs */ +"2: rdpr %%pil, %1\n" +" wrpr %3, %%pil\n" +"3: ldub [%2], %0\n" +" membar #LoadLoad\n" +" brnz,pt %0, 3b\n" +" nop\n" +" ba,pt %%xcc, 1b\n" +" wrpr %1, %%pil\n" +" .previous" + : "=&r" (tmp1), "=&r" (tmp2) + : "r"(lock), "r"(flags) + : "memory"); +} + +/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ + +static void inline __read_lock(raw_rwlock_t *lock) +{ + unsigned long tmp1, tmp2; + + __asm__ __volatile__ ( +"1: ldsw [%2], %0\n" +" brlz,pn %0, 2f\n" +"4: add %0, 1, %1\n" +" cas [%2], %0, %1\n" +" cmp %0, %1\n" +" membar #StoreLoad | #StoreStore\n" +" bne,pn %%icc, 1b\n" +" nop\n" +" .subsection 2\n" +"2: ldsw [%2], %0\n" +" membar #LoadLoad\n" +" brlz,pt %0, 2b\n" +" nop\n" +" ba,a,pt %%xcc, 4b\n" +" .previous" + : "=&r" (tmp1), "=&r" (tmp2) : "r" (lock) - : "g7", "memory"); + : "memory"); } -static __inline__ int _raw_spin_trylock(spinlock_t *lock) +static int inline __read_trylock(raw_rwlock_t *lock) { - unsigned int result; - __asm__ __volatile__("ldstub [%1], %0\n\t" - "membar #StoreLoad | #StoreStore" - : "=r" (result) - : "r" (lock) - : "memory"); - return (result == 0); + int tmp1, tmp2; + + __asm__ __volatile__ ( +"1: ldsw [%2], %0\n" +" brlz,a,pn %0, 2f\n" +" mov 0, %0\n" +" add %0, 1, %1\n" +" cas [%2], %0, %1\n" +" cmp %0, %1\n" +" membar #StoreLoad | #StoreStore\n" +" bne,pn %%icc, 1b\n" +" mov 1, %0\n" +"2:" + : "=&r" (tmp1), "=&r" (tmp2) + : "r" (lock) + : "memory"); + + return tmp1; } -static __inline__ void _raw_spin_unlock(spinlock_t *lock) +static void inline __read_unlock(raw_rwlock_t *lock) { - __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t" - "stb %%g0, [%0]" - : /* No outputs */ - : "r" (lock) - : "memory"); + unsigned long tmp1, tmp2; + + __asm__ __volatile__( +" membar #StoreLoad | #LoadLoad\n" +"1: lduw [%2], %0\n" +" sub %0, 1, %1\n" +" cas [%2], %0, %1\n" +" cmp %0, %1\n" +" bne,pn %%xcc, 1b\n" +" nop" + : "=&r" (tmp1), "=&r" (tmp2) + : "r" (lock) + : "memory"); } -#else /* !(CONFIG_DEBUG_SPINLOCK) */ - -typedef struct { - unsigned char lock; - unsigned int owner_pc, owner_cpu; -} spinlock_t; -#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 0, 0xff } -#define spin_lock_init(__lock) \ -do { (__lock)->lock = 0; \ - (__lock)->owner_pc = 0; \ - (__lock)->owner_cpu = 0xff; \ -} while(0) -#define spin_is_locked(__lock) (*((volatile unsigned char *)(&((__lock)->lock))) != 0) -#define spin_unlock_wait(__lock) \ -do { \ - membar("#LoadLoad"); \ -} while(*((volatile unsigned char *)(&((__lock)->lock)))) - -extern void _do_spin_lock (spinlock_t *lock, char *str); -extern void _do_spin_unlock (spinlock_t *lock); -extern int _spin_trylock (spinlock_t *lock); - -#define _raw_spin_trylock(lp) _spin_trylock(lp) -#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") -#define _raw_spin_unlock(lock) _do_spin_unlock(lock) - -#endif /* CONFIG_DEBUG_SPINLOCK */ +static void inline __write_lock(raw_rwlock_t *lock) +{ + unsigned long mask, tmp1, tmp2; -/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ + mask = 0x80000000UL; + + __asm__ __volatile__( +"1: lduw [%2], %0\n" +" brnz,pn %0, 2f\n" +"4: or %0, %3, %1\n" +" cas [%2], %0, %1\n" +" cmp %0, %1\n" +" membar #StoreLoad | #StoreStore\n" +" bne,pn %%icc, 1b\n" +" nop\n" +" .subsection 2\n" +"2: lduw [%2], %0\n" +" membar #LoadLoad\n" +" brnz,pt %0, 2b\n" +" nop\n" +" ba,a,pt %%xcc, 4b\n" +" .previous" + : "=&r" (tmp1), "=&r" (tmp2) + : "r" (lock), "r" (mask) + : "memory"); +} + +static void inline __write_unlock(raw_rwlock_t *lock) +{ + __asm__ __volatile__( +" membar #LoadStore | #StoreStore\n" +" stw %%g0, [%0]" + : /* no outputs */ + : "r" (lock) + : "memory"); +} + +static int inline __write_trylock(raw_rwlock_t *lock) +{ + unsigned long mask, tmp1, tmp2, result; + + mask = 0x80000000UL; + + __asm__ __volatile__( +" mov 0, %2\n" +"1: lduw [%3], %0\n" +" brnz,pn %0, 2f\n" +" or %0, %4, %1\n" +" cas [%3], %0, %1\n" +" cmp %0, %1\n" +" membar #StoreLoad | #StoreStore\n" +" bne,pn %%icc, 1b\n" +" nop\n" +" mov 1, %2\n" +"2:" + : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result) + : "r" (lock), "r" (mask) + : "memory"); + + return result; +} + +#define __raw_read_lock(p) __read_lock(p) +#define __raw_read_trylock(p) __read_trylock(p) +#define __raw_read_unlock(p) __read_unlock(p) +#define __raw_write_lock(p) __write_lock(p) +#define __raw_write_unlock(p) __write_unlock(p) +#define __raw_write_trylock(p) __write_trylock(p) + +#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define __raw_write_can_lock(rw) (!(rw)->lock) -#ifndef CONFIG_DEBUG_SPINLOCK - -typedef unsigned int rwlock_t; -#define RW_LOCK_UNLOCKED 0 -#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) -#define rwlock_is_locked(x) (*(x) != RW_LOCK_UNLOCKED) - -extern void __read_lock(rwlock_t *); -extern void __read_unlock(rwlock_t *); -extern void __write_lock(rwlock_t *); -extern void __write_unlock(rwlock_t *); -extern int __write_trylock(rwlock_t *); - -#define _raw_read_lock(p) __read_lock(p) -#define _raw_read_unlock(p) __read_unlock(p) -#define _raw_write_lock(p) __write_lock(p) -#define _raw_write_unlock(p) __write_unlock(p) -#define _raw_write_trylock(p) __write_trylock(p) - -#else /* !(CONFIG_DEBUG_SPINLOCK) */ - -typedef struct { - unsigned long lock; - unsigned int writer_pc, writer_cpu; - unsigned int reader_pc[NR_CPUS]; -} rwlock_t; -#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } -#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) -#define rwlock_is_locked(x) ((x)->lock != 0) - -extern void _do_read_lock(rwlock_t *rw, char *str); -extern void _do_read_unlock(rwlock_t *rw, char *str); -extern void _do_write_lock(rwlock_t *rw, char *str); -extern void _do_write_unlock(rwlock_t *rw); - -#define _raw_read_lock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_read_lock(lock, "read_lock"); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_read_unlock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_read_unlock(lock, "read_unlock"); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_write_lock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_write_lock(lock, "write_lock"); \ - local_irq_restore(flags); \ -} while(0) - -#define _raw_write_unlock(lock) \ -do { unsigned long flags; \ - local_irq_save(flags); \ - _do_write_unlock(lock); \ - local_irq_restore(flags); \ -} while(0) - -#endif /* CONFIG_DEBUG_SPINLOCK */ +#define _raw_spin_relax(lock) cpu_relax() +#define _raw_read_relax(lock) cpu_relax() +#define _raw_write_relax(lock) cpu_relax() #endif /* !(__ASSEMBLY__) */