Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / include / asm-alpha / spinlock.h
index 5e35c5d..8197c69 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/kernel.h>
 #include <asm/current.h>
 
-
 /*
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  * We make no fairness assumptions. They have a cost.
  */
 
-typedef struct {
-       volatile unsigned int lock /*__attribute__((aligned(32))) */;
-#ifdef CONFIG_DEBUG_SPINLOCK
-       int on_cpu;
-       int line_no;
-       void *previous;
-       struct task_struct * task;
-       const char *base_file;
-#endif
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} spinlock_t;
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-#define SPIN_LOCK_UNLOCKED (spinlock_t) {0, -1, 0, NULL, NULL, NULL}
-#define spin_lock_init(x)                                              \
-       ((x)->lock = 0, (x)->on_cpu = -1, (x)->previous = NULL, (x)->task = NULL)
-#else
-#define SPIN_LOCK_UNLOCKED     (spinlock_t) { 0 }
-#define spin_lock_init(x)      ((x)->lock = 0)
-#endif
-
-#define spin_is_locked(x)      ((x)->lock != 0)
-#define spin_unlock_wait(x)    ({ do { barrier(); } while ((x)->lock); })
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#ifdef CONFIG_DEBUG_SPINLOCK
-extern void _raw_spin_unlock(spinlock_t * lock);
-extern void debug_spin_lock(spinlock_t * lock, const char *, int);
-extern int debug_spin_trylock(spinlock_t * lock, const char *, int);
-
-#define _raw_spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
-#define _raw_spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
-
-#define spin_lock_own(LOCK, LOCATION)                                  \
-do {                                                                   \
-       if (!((LOCK)->lock && (LOCK)->on_cpu == smp_processor_id()))    \
-               printk("%s: called on %d from %p but lock %s on %d\n",  \
-                      LOCATION, smp_processor_id(),                    \
-                      __builtin_return_address(0),                     \
-                      (LOCK)->lock ? "taken" : "freed", (LOCK)->on_cpu); \
-} while (0)
-#else
-static inline void _raw_spin_unlock(spinlock_t * lock)
+#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
+#define __raw_spin_is_locked(x)        ((x)->lock != 0)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while ((x)->lock)
+
+static inline void __raw_spin_unlock(raw_spinlock_t * lock)
 {
        mb();
        lock->lock = 0;
 }
 
-static inline void _raw_spin_lock(spinlock_t * lock)
+static inline void __raw_spin_lock(raw_spinlock_t * lock)
 {
        long tmp;
 
-       /* Use sub-sections to put the actual loop at the end
-          of this object file's text section so as to perfect
-          branch prediction.  */
        __asm__ __volatile__(
        "1:     ldl_l   %0,%1\n"
-       "       blbs    %0,2f\n"
-       "       or      %0,1,%0\n"
+       "       bne     %0,2f\n"
+       "       lda     %0,1\n"
        "       stl_c   %0,%1\n"
        "       beq     %0,2f\n"
        "       mb\n"
        ".subsection 2\n"
        "2:     ldl     %0,%1\n"
-       "       blbs    %0,2b\n"
+       "       bne     %0,2b\n"
        "       br      1b\n"
        ".previous"
        : "=&r" (tmp), "=m" (lock->lock)
        : "m"(lock->lock) : "memory");
 }
 
-static inline int _raw_spin_trylock(spinlock_t *lock)
+static inline int __raw_spin_trylock(raw_spinlock_t *lock)
 {
        return !test_and_set_bit(0, &lock->lock);
 }
 
-#define spin_lock_own(LOCK, LOCATION)  ((void)0)
-#endif /* CONFIG_DEBUG_SPINLOCK */
-
 /***********************************************************/
 
-typedef struct {
-       volatile unsigned int write_lock:1, read_counter:31;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} /*__attribute__((aligned(32)))*/ rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+static inline int __raw_read_can_lock(raw_rwlock_t *lock)
+{
+       return (lock->lock & 1) == 0;
+}
 
-#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
+static inline int __raw_write_can_lock(raw_rwlock_t *lock)
+{
+       return lock->lock == 0;
+}
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _raw_write_lock(rwlock_t * lock);
-extern void _raw_read_lock(rwlock_t * lock);
-#else
-static inline void _raw_write_lock(rwlock_t * lock)
+static inline void __raw_read_lock(raw_rwlock_t *lock)
 {
        long regx;
 
        __asm__ __volatile__(
        "1:     ldl_l   %1,%0\n"
-       "       bne     %1,6f\n"
-       "       or      $31,1,%1\n"
+       "       blbs    %1,6f\n"
+       "       subl    %1,2,%1\n"
        "       stl_c   %1,%0\n"
        "       beq     %1,6f\n"
        "       mb\n"
        ".subsection 2\n"
        "6:     ldl     %1,%0\n"
-       "       bne     %1,6b\n"
+       "       blbs    %1,6b\n"
        "       br      1b\n"
        ".previous"
        : "=m" (*lock), "=&r" (regx)
        : "m" (*lock) : "memory");
 }
 
-static inline void _raw_read_lock(rwlock_t * lock)
+static inline void __raw_write_lock(raw_rwlock_t *lock)
 {
        long regx;
 
        __asm__ __volatile__(
        "1:     ldl_l   %1,%0\n"
-       "       blbs    %1,6f\n"
-       "       subl    %1,2,%1\n"
+       "       bne     %1,6f\n"
+       "       lda     %1,1\n"
        "       stl_c   %1,%0\n"
        "       beq     %1,6f\n"
-       "4:     mb\n"
+       "       mb\n"
        ".subsection 2\n"
        "6:     ldl     %1,%0\n"
-       "       blbs    %1,6b\n"
+       "       bne     %1,6b\n"
        "       br      1b\n"
        ".previous"
        : "=m" (*lock), "=&r" (regx)
        : "m" (*lock) : "memory");
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static inline int _raw_write_trylock(rwlock_t * lock)
+static inline int __raw_read_trylock(raw_rwlock_t * lock)
 {
        long regx;
        int success;
@@ -161,11 +109,10 @@ static inline int _raw_write_trylock(rwlock_t * lock)
        __asm__ __volatile__(
        "1:     ldl_l   %1,%0\n"
        "       lda     %2,0\n"
-       "       bne     %1,2f\n"
-       "       or      $31,1,%1\n"
-       "       stl_c   %1,%0\n"
-       "       beq     %1,6f\n"
-       "       lda     %2,1\n"
+       "       blbs    %1,2f\n"
+       "       subl    %1,2,%2\n"
+       "       stl_c   %2,%0\n"
+       "       beq     %2,6f\n"
        "2:     mb\n"
        ".subsection 2\n"
        "6:     br      1b\n"
@@ -176,13 +123,29 @@ static inline int _raw_write_trylock(rwlock_t * lock)
        return success;
 }
 
-static inline void _raw_write_unlock(rwlock_t * lock)
+static inline int __raw_write_trylock(raw_rwlock_t * lock)
 {
-       mb();
-       *(volatile int *)lock = 0;
+       long regx;
+       int success;
+
+       __asm__ __volatile__(
+       "1:     ldl_l   %1,%0\n"
+       "       lda     %2,0\n"
+       "       bne     %1,2f\n"
+       "       lda     %2,1\n"
+       "       stl_c   %2,%0\n"
+       "       beq     %2,6f\n"
+       "2:     mb\n"
+       ".subsection 2\n"
+       "6:     br      1b\n"
+       ".previous"
+       : "=m" (*lock), "=&r" (regx), "=&r" (success)
+       : "m" (*lock) : "memory");
+
+       return success;
 }
 
-static inline void _raw_read_unlock(rwlock_t * lock)
+static inline void __raw_read_unlock(raw_rwlock_t * lock)
 {
        long regx;
        __asm__ __volatile__(
@@ -198,4 +161,10 @@ static inline void _raw_read_unlock(rwlock_t * lock)
        : "m" (*lock) : "memory");
 }
 
+static inline void __raw_write_unlock(raw_rwlock_t * lock)
+{
+       mb();
+       lock->lock = 0;
+}
+
 #endif /* _ALPHA_SPINLOCK_H */