#include <linux/config.h>
#include <asm/system.h>
-
/* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>. */
/*
*/
#ifdef CONFIG_SMP
+#include <asm/spinlock.h>
+#include <asm/cache.h> /* we use L1_CACHE_BYTES */
+
/* Use an array of spinlocks for our atomic_ts.
-** Hash function to index into a different SPINLOCK.
-** Since "a" is usually an address, ">>8" makes one spinlock per 64-bytes.
-*/
+ * Hash function to index into a different SPINLOCK.
+ * Since "a" is usually an address, use one spinlock per cacheline.
+ */
# define ATOMIC_HASH_SIZE 4
-# define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long) a)>>8)&(ATOMIC_HASH_SIZE-1)])
-
-extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
-/* copied from <asm/spinlock.h> and modified */
-# define SPIN_LOCK(x) \
- do { while(__ldcw(&(x)->lock) == 0); } while(0)
-
-# define SPIN_UNLOCK(x) \
- do { (x)->lock = 1; } while(0)
-#else
-# define ATOMIC_HASH_SIZE 1
-# define ATOMIC_HASH(a) (0)
+# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-/* copied from <linux/spinlock.h> and modified */
-# define SPIN_LOCK(x) (void)(x)
-
-# define SPIN_UNLOCK(x) do { } while(0)
-#endif
+extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+
+/* Can't use _raw_spin_lock_irq because of #include problems, so
+ * this is the substitute */
+#define _atomic_spin_lock_irqsave(l,f) do { \
+ spinlock_t *s = ATOMIC_HASH(l); \
+ local_irq_save(f); \
+ _raw_spin_lock(s); \
+} while(0)
+
+#define _atomic_spin_unlock_irqrestore(l,f) do { \
+ spinlock_t *s = ATOMIC_HASH(l); \
+ _raw_spin_unlock(s); \
+ local_irq_restore(f); \
+} while(0)
-/* copied from <linux/spinlock.h> and modified */
-#define SPIN_LOCK_IRQSAVE(lock, flags) do { local_irq_save(flags); SPIN_LOCK(lock); } while (0)
-#define SPIN_UNLOCK_IRQRESTORE(lock, flags) do { SPIN_UNLOCK(lock); local_irq_restore(flags); } while (0)
+
+#else
+# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
+# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
+#endif
/* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees unconsistent values.
* Cache-line alignment would conflict with, for example, linux/module.h
*/
-typedef struct { volatile long counter; } atomic_t;
+typedef struct { volatile int counter; } atomic_t;
/* This should get optimized out since it's never called.
{
int ret;
unsigned long flags;
- SPIN_LOCK_IRQSAVE(ATOMIC_HASH(v), flags);
+ _atomic_spin_lock_irqsave(v, flags);
ret = (v->counter += i);
- SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
+ _atomic_spin_unlock_irqrestore(v, flags);
return ret;
}
static __inline__ void atomic_set(atomic_t *v, int i)
{
unsigned long flags;
- SPIN_LOCK_IRQSAVE(ATOMIC_HASH(v), flags);
+ _atomic_spin_lock_irqsave(v, flags);
v->counter = i;
- SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
+ _atomic_spin_unlock_irqrestore(v, flags);
}
static __inline__ int atomic_read(const atomic_t *v)
#define atomic_inc_return(v) (__atomic_add_return( 1,(v)))
#define atomic_dec_return(v) (__atomic_add_return( -1,(v)))
+#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+
+/*
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
#define ATOMIC_INIT(i) { (i) }