fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / include / asm-parisc / spinlock.h
index 679ea1c..f3d2090 100644 (file)
@@ -2,41 +2,38 @@
 #define __ASM_SPINLOCK_H
 
 #include <asm/system.h>
+#include <asm/processor.h>
+#include <asm/spinlock_types.h>
 
-/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
- * since it only has load-and-zero. Moreover, at least on some PA processors,
- * the semaphore address has to be 16-byte aligned.
- */
-
-#ifndef CONFIG_DEBUG_SPINLOCK
-
-#define __SPIN_LOCK_UNLOCKED   { { 1, 1, 1, 1 } }
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-static inline int spin_is_locked(spinlock_t *x)
+static inline int __raw_spin_is_locked(raw_spinlock_t *x)
 {
        volatile unsigned int *a = __ldcw_align(x);
        return *a == 0;
 }
 
-#define spin_unlock_wait(x)    do { barrier(); } while(spin_is_locked(x))
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
+#define __raw_spin_unlock_wait(x) \
+               do { cpu_relax(); } while (__raw_spin_is_locked(x))
 
-static inline void _raw_spin_lock(spinlock_t *x)
+static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+                                        unsigned long flags)
 {
        volatile unsigned int *a;
 
        mb();
        a = __ldcw_align(x);
        while (__ldcw(a) == 0)
-               while (*a == 0);
+               while (*a == 0)
+                       if (flags & PSW_SM_I) {
+                               local_irq_enable();
+                               cpu_relax();
+                               local_irq_disable();
+                       } else
+                               cpu_relax();
        mb();
 }
 
-static inline void _raw_spin_unlock(spinlock_t *x)
+static inline void __raw_spin_unlock(raw_spinlock_t *x)
 {
        volatile unsigned int *a;
        mb();
@@ -45,7 +42,7 @@ static inline void _raw_spin_unlock(spinlock_t *x)
        mb();
 }
 
-static inline int _raw_spin_trylock(spinlock_t *x)
+static inline int __raw_spin_trylock(raw_spinlock_t *x)
 {
        volatile unsigned int *a;
        int ret;
@@ -57,202 +54,141 @@ static inline int _raw_spin_trylock(spinlock_t *x)
 
        return ret;
 }
-       
-#define spin_lock_own(LOCK, LOCATION)  ((void)0)
-
-#else /* !(CONFIG_DEBUG_SPINLOCK) */
-
-#define SPINLOCK_MAGIC 0x1D244B3C
-
-#define __SPIN_LOCK_UNLOCKED   { { 1, 1, 1, 1 }, SPINLOCK_MAGIC, 10, __FILE__ , NULL, 0, -1, NULL, NULL }
-#undef SPIN_LOCK_UNLOCKED
-#define SPIN_LOCK_UNLOCKED (spinlock_t) __SPIN_LOCK_UNLOCKED
-
-#define spin_lock_init(x)      do { *(x) = SPIN_LOCK_UNLOCKED; } while(0)
-
-#define CHECK_LOCK(x)                                                  \
-       do {                                                            \
-               if (unlikely((x)->magic != SPINLOCK_MAGIC)) {                   \
-                       printk(KERN_ERR "%s:%d: spin_is_locked"         \
-                       " on uninitialized spinlock %p.\n",             \
-                               __FILE__, __LINE__, (x));               \
-               }                                                       \
-       } while(0)
-
-#define spin_is_locked(x)                                              \
-       ({                                                              \
-               CHECK_LOCK(x);                                          \
-               volatile unsigned int *a = __ldcw_align(x);             \
-               if (unlikely((*a == 0) && (x)->babble)) {                               \
-                       (x)->babble--;                                  \
-                       printk("KERN_WARNING                            \
-                               %s:%d: spin_is_locked(%s/%p) already"   \
-                               " locked by %s:%d in %s at %p(%d)\n",   \
-                               __FILE__,__LINE__, (x)->module, (x),    \
-                               (x)->bfile, (x)->bline, (x)->task->comm,\
-                               (x)->previous, (x)->oncpu);             \
-               }                                                       \
-               *a == 0;                                                \
-       })
-
-#define spin_unlock_wait(x)                                            \
-       do {                                                            \
-               CHECK_LOCK(x);                                          \
-               volatile unsigned int *a = __ldcw_align(x);             \
-               if (unlikely((*a == 0) && (x)->babble)) {                               \
-                       (x)->babble--;                                  \
-                       printk("KERN_WARNING                            \
-                               %s:%d: spin_unlock_wait(%s/%p)"         \
-                               " owned by %s:%d in %s at %p(%d)\n",    \
-                               __FILE__,__LINE__, (x)->module, (x),    \
-                               (x)->bfile, (x)->bline, (x)->task->comm,\
-                               (x)->previous, (x)->oncpu);             \
-               }                                                       \
-               barrier();                                              \
-       } while (*((volatile unsigned char *)(__ldcw_align(x))) == 0)
-
-extern void _dbg_spin_lock(spinlock_t *lock, const char *base_file, int line_no);
-extern void _dbg_spin_unlock(spinlock_t *lock, const char *, int);
-extern int _dbg_spin_trylock(spinlock_t * lock, const char *, int);
-
-#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
-
-#define _raw_spin_unlock(lock) _dbg_spin_unlock(lock, __FILE__, __LINE__)
-#define _raw_spin_lock(lock) _dbg_spin_lock(lock, __FILE__, __LINE__)
-#define _raw_spin_trylock(lock) _dbg_spin_trylock(lock, __FILE__, __LINE__)
-
-/* just in case we need it */
-#define spin_lock_own(LOCK, LOCATION)                                  \
-do {                                                                   \
-       volatile unsigned int *a = __ldcw_align(LOCK);                  \
-       if (!((*a == 0) && ((LOCK)->oncpu == smp_processor_id())))      \
-               printk("KERN_WARNING                                    \
-                       %s: called on %d from %p but lock %s on %d\n",  \
-                       LOCATION, smp_processor_id(),                   \
-                       __builtin_return_address(0),                    \
-                       (*a == 0) ? "taken" : "freed", (LOCK)->on_cpu); \
-} while (0)
-
-#endif /* !(CONFIG_DEBUG_SPINLOCK) */
 
 /*
- * Read-write spinlocks, allowing multiple readers
- * but only one writer.
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ * Linux rwlocks are unfair to writers; they can be starved for an indefinite
+ * time by readers.  With care, they can also be taken in interrupt context.
+ *
+ * In the PA-RISC implementation, we have a spinlock and a counter.
+ * Readers use the lock to serialise their access to the counter (which
+ * records how many readers currently hold the lock).
+ * Writers hold the spinlock, preventing any readers or other writers from
+ * grabbing the rwlock.
  */
-typedef struct {
-       spinlock_t lock;
-       volatile int counter;
-#ifdef CONFIG_PREEMPT
-       unsigned int break_lock;
-#endif
-} rwlock_t;
-
-#define RW_LOCK_UNLOCKED (rwlock_t) { __SPIN_LOCK_UNLOCKED, 0 }
-
-#define rwlock_init(lp)        do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
-
-#define _raw_read_trylock(lock) generic_raw_read_trylock(lock)
 
-/* read_lock, read_unlock are pretty straightforward.  Of course it somehow
- * sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
-
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_read_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_read_lock(rw) _dbg_read_lock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ void _raw_read_lock(rwlock_t *rw)
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static  __inline__ void __raw_read_lock(raw_rwlock_t *rw)
 {
        unsigned long flags;
        local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
-
+       __raw_spin_lock_flags(&rw->lock, flags);
        rw->counter++;
-
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
        local_irq_restore(flags);
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static  __inline__ void _raw_read_unlock(rwlock_t *rw)
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static  __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned long flags;
        local_irq_save(flags);
-       _raw_spin_lock(&rw->lock); 
-
+       __raw_spin_lock_flags(&rw->lock, flags);
        rw->counter--;
-
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
        local_irq_restore(flags);
 }
 
-/* write_lock is less trivial.  We optimistically grab the lock and check
- * if we surprised any readers.  If so we release the lock and wait till
- * they're all gone before trying again
- *
- * Also note that we don't use the _irqsave / _irqrestore suffixes here.
- * If we're called with interrupts enabled and we've got readers (or other
- * writers) in interrupt handlers someone fucked up and we'd dead-lock
- * sooner or later anyway.   prumpf */
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to grab the same read lock */
+static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
+{
+       unsigned long flags;
+ retry:
+       local_irq_save(flags);
+       if (__raw_spin_trylock(&rw->lock)) {
+               rw->counter++;
+               __raw_spin_unlock(&rw->lock);
+               local_irq_restore(flags);
+               return 1;
+       }
+
+       local_irq_restore(flags);
+       /* If write-locked, we fail to acquire the lock */
+       if (rw->counter < 0)
+               return 0;
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern void _dbg_write_lock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_write_lock(rw) _dbg_write_lock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ void _raw_write_lock(rwlock_t *rw)
+       /* Wait until we have a realistic chance at the lock */
+       while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
+               cpu_relax();
+
+       goto retry;
+}
+
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to read_trylock() this lock */
+static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
 {
+       unsigned long flags;
 retry:
-       _raw_spin_lock(&rw->lock);
+       local_irq_save(flags);
+       __raw_spin_lock_flags(&rw->lock, flags);
 
-       if(rw->counter != 0) {
-               /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
+       if (rw->counter != 0) {
+               __raw_spin_unlock(&rw->lock);
+               local_irq_restore(flags);
 
-               while(rw->counter != 0);
+               while (rw->counter != 0)
+                       cpu_relax();
 
                goto retry;
        }
 
-       /* got it.  now leave without unlocking */
-       rw->counter = -1; /* remember we are locked */
+       rw->counter = -1; /* mark as write-locked */
+       mb();
+       local_irq_restore(flags);
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-/* write_unlock is absolutely trivial - we don't have to wait for anything */
-
-static  __inline__ void _raw_write_unlock(rwlock_t *rw)
+static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
 {
        rw->counter = 0;
-       _raw_spin_unlock(&rw->lock);
+       __raw_spin_unlock(&rw->lock);
 }
 
-#ifdef CONFIG_DEBUG_RWLOCK
-extern int _dbg_write_trylock(rwlock_t * rw, const char *bfile, int bline);
-#define _raw_write_trylock(rw) _dbg_write_trylock(rw, __FILE__, __LINE__)
-#else
-static  __inline__ int _raw_write_trylock(rwlock_t *rw)
+/* Note that we have to ensure interrupts are disabled in case we're
+ * interrupted by some other code that wants to read_trylock() this lock */
+static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
 {
-       _raw_spin_lock(&rw->lock);
-       if (rw->counter != 0) {
-               /* this basically never happens */
-               _raw_spin_unlock(&rw->lock);
+       unsigned long flags;
+       int result = 0;
 
-               return 0;
+       local_irq_save(flags);
+       if (__raw_spin_trylock(&rw->lock)) {
+               if (rw->counter == 0) {
+                       rw->counter = -1;
+                       result = 1;
+               } else {
+                       /* Read-locked.  Oh well. */
+                       __raw_spin_unlock(&rw->lock);
+               }
        }
+       local_irq_restore(flags);
 
-       /* got it.  now leave without unlocking */
-       rw->counter = -1; /* remember we are locked */
-       return 1;
+       return result;
 }
-#endif /* CONFIG_DEBUG_RWLOCK */
 
-static __inline__ int is_read_locked(rwlock_t *rw)
+/*
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
 {
-       return rw->counter > 0;
+       return rw->counter >= 0;
 }
 
-static __inline__ int is_write_locked(rwlock_t *rw)
+/*
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
 {
-       return rw->counter < 0;
+       return !rw->counter;
 }
 
+#define _raw_spin_relax(lock)  cpu_relax()
+#define _raw_read_relax(lock)  cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
 #endif /* __ASM_SPINLOCK_H */