This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / include / asm-parisc / atomic.h
index e24f757..4b04759 100644 (file)
  */
 
 #ifdef CONFIG_SMP
-#include <asm/spinlock.h>
 #include <asm/cache.h>         /* we use L1_CACHE_BYTES */
 
+typedef spinlock_t atomic_lock_t;
+
 /* Use an array of spinlocks for our atomic_ts.
  * Hash function to index into a different SPINLOCK.
  * Since "a" is usually an address, use one spinlock per cacheline.
 #  define ATOMIC_HASH_SIZE 4
 #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
 
-extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
-
-/* Can't use _raw_spin_lock_irq because of #include problems, so
- * this is the substitute */
-#define _atomic_spin_lock_irqsave(l,f) do {    \
-       spinlock_t *s = ATOMIC_HASH(l);         \
-       local_irq_save(f);                      \
-       _raw_spin_lock(s);                      \
-} while(0)
+extern atomic_lock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
 
-#define _atomic_spin_unlock_irqrestore(l,f) do {       \
-       spinlock_t *s = ATOMIC_HASH(l);                 \
-       _raw_spin_unlock(s);                            \
-       local_irq_restore(f);                           \
-} while(0)
+static inline void atomic_spin_lock(atomic_lock_t *a)
+{
+       while (__ldcw(a) == 0)
+               while (a->lock[0] == 0);
+}
 
+static inline void atomic_spin_unlock(atomic_lock_t *a)
+{
+       a->lock[0] = 1;
+}
 
 #else
-#  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-#  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
+#  define ATOMIC_HASH_SIZE 1
+#  define ATOMIC_HASH(a)       (0)
+#  define atomic_spin_lock(x) (void)(x)
+#  define atomic_spin_unlock(x) do { } while(0)
 #endif
 
+/* copied from <linux/spinlock.h> and modified */
+#define atomic_spin_lock_irqsave(lock, flags)  do {    \
+       local_irq_save(flags);                          \
+       atomic_spin_lock(lock);                         \
+} while (0)
+
+#define atomic_spin_unlock_irqrestore(lock, flags) do {        \
+       atomic_spin_unlock(lock);                       \
+       local_irq_restore(flags);                       \
+} while (0)
+
 /* Note that we need not lock read accesses - aligned word writes/reads
  * are atomic, so a reader never sees unconsistent values.
  *
  * Cache-line alignment would conflict with, for example, linux/module.h
  */
 
-typedef struct { volatile int counter; } atomic_t;
+typedef struct { volatile long counter; } atomic_t;
 
 
 /* This should get optimized out since it's never called.
@@ -140,22 +150,22 @@ static __inline__ int __atomic_add_return(int i, atomic_t *v)
 {
        int ret;
        unsigned long flags;
-       _atomic_spin_lock_irqsave(v, flags);
+       atomic_spin_lock_irqsave(ATOMIC_HASH(v), flags);
 
        ret = (v->counter += i);
 
-       _atomic_spin_unlock_irqrestore(v, flags);
+       atomic_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
        return ret;
 }
 
 static __inline__ void atomic_set(atomic_t *v, int i) 
 {
        unsigned long flags;
-       _atomic_spin_lock_irqsave(v, flags);
+       atomic_spin_lock_irqsave(ATOMIC_HASH(v), flags);
 
        v->counter = i;
 
-       _atomic_spin_unlock_irqrestore(v, flags);
+       atomic_spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
 }
 
 static __inline__ int atomic_read(const atomic_t *v)