* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 1996, 97, 99, 2000, 03 by Ralf Baechle
+ * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
*/
-
-/*
- * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in
- * <linux/spinlock.h> we have to include <linux/spinlock.h> outside the
- * main big wrapper ...
- */
-#include <linux/config.h>
-#include <linux/spinlock.h>
-
#ifndef _ASM_ATOMIC_H
#define _ASM_ATOMIC_H
-extern spinlock_t atomic_lock;
+#include <linux/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/cpu-features.h>
+#include <asm/war.h>
typedef struct { volatile int counter; } atomic_t;
*/
#define atomic_set(v,i) ((v)->counter = (i))
-#ifdef CONFIG_CPU_HAS_LLSC
-
/*
* atomic_add - add integer to atomic variable
* @i: integer value to add
*/
static __inline__ void atomic_add(int i, atomic_t * v)
{
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ll %0, %1 # atomic_add \n"
- " addu %0, %2 \n"
- " sc %0, %1 \n"
- " beqz %0, 1b \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %0, %1 # atomic_add \n"
+ " addu %0, %2 \n"
+ " sc %0, %1 \n"
+ " beqzl %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %0, %1 # atomic_add \n"
+ " addu %0, %2 \n"
+ " sc %0, %1 \n"
+ " beqz %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ v->counter += i;
+ local_irq_restore(flags);
+ }
}
/*
*/
static __inline__ void atomic_sub(int i, atomic_t * v)
{
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: ll %0, %1 # atomic_sub \n"
- " subu %0, %2 \n"
- " sc %0, %1 \n"
- " beqz %0, 1b \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %0, %1 # atomic_sub \n"
+ " subu %0, %2 \n"
+ " sc %0, %1 \n"
+ " beqzl %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %0, %1 # atomic_sub \n"
+ " subu %0, %2 \n"
+ " sc %0, %1 \n"
+ " beqz %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ v->counter -= i;
+ local_irq_restore(flags);
+ }
}
/*
*/
static __inline__ int atomic_add_return(int i, atomic_t * v)
{
- unsigned long temp, result;
-
- __asm__ __volatile__(
- "1: ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqz %0, 1b \n"
- " addu %0, %1, %3 \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ unsigned long result;
+
+ smp_mb();
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %1, %2 # atomic_add_return \n"
+ " addu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " beqzl %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %1, %2 # atomic_add_return \n"
+ " addu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " beqz %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = v->counter;
+ result += i;
+ v->counter = result;
+ local_irq_restore(flags);
+ }
+
+ smp_mb();
return result;
}
static __inline__ int atomic_sub_return(int i, atomic_t * v)
{
- unsigned long temp, result;
-
- __asm__ __volatile__(
- "1: ll %1, %2 # atomic_sub_return \n"
- " subu %0, %1, %3 \n"
- " sc %0, %2 \n"
- " beqz %0, 1b \n"
- " subu %0, %1, %3 \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ unsigned long result;
+
+ smp_mb();
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %1, %2 # atomic_sub_return \n"
+ " subu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " beqzl %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %1, %2 # atomic_sub_return \n"
+ " subu %0, %1, %3 \n"
+ " sc %0, %2 \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = v->counter;
+ result -= i;
+ v->counter = result;
+ local_irq_restore(flags);
+ }
+
+ smp_mb();
return result;
}
-#else
-
/*
- * The MIPS I implementation is only atomic with respect to
- * interrupts. R3000 based multiprocessor machines are rare anyway ...
- *
- * atomic_add - add integer to atomic variable
- * @i: integer value to add
+ * atomic_sub_if_positive - conditionally subtract integer from atomic variable
+ * @i: integer value to subtract
* @v: pointer of type atomic_t
*
- * Atomically adds @i to @v.
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
*/
-static __inline__ void atomic_add(int i, atomic_t * v)
+static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
{
- unsigned long flags;
+ unsigned long result;
+
+ smp_mb();
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %1, %2 # atomic_sub_if_positive\n"
+ " subu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+ " sc %0, %2 \n"
+ " .set noreorder \n"
+ " beqzl %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: ll %1, %2 # atomic_sub_if_positive\n"
+ " subu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+ " sc %0, %2 \n"
+ " .set noreorder \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = v->counter;
+ result -= i;
+ if (result >= 0)
+ v->counter = result;
+ local_irq_restore(flags);
+ }
+
+ smp_mb();
- spin_lock_irqsave(&atomic_lock, flags);
- v->counter += i;
- spin_unlock_irqrestore(&atomic_lock, flags);
+ return result;
}
-/*
- * atomic_sub - subtract the atomic variable
- * @i: integer value to subtract
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
*
- * Atomically subtracts @i from @v.
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
*/
-static __inline__ void atomic_sub(int i, atomic_t * v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&atomic_lock, flags);
- v->counter -= i;
- spin_unlock_irqrestore(&atomic_lock, flags);
-}
-
-static __inline__ int atomic_add_return(int i, atomic_t * v)
-{
- unsigned long flags;
- int temp;
-
- spin_lock_irqsave(&atomic_lock, flags);
- temp = v->counter;
- temp += i;
- v->counter = temp;
- spin_unlock_irqrestore(&atomic_lock, flags);
-
- return temp;
-}
-
-static __inline__ int atomic_sub_return(int i, atomic_t * v)
-{
- unsigned long flags;
- int temp;
-
- spin_lock_irqsave(&atomic_lock, flags);
- temp = v->counter;
- temp -= i;
- v->counter = temp;
- spin_unlock_irqrestore(&atomic_lock, flags);
-
- return temp;
-}
-
-#endif /* CONFIG_CPU_HAS_LLSC */
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
*/
#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+/*
+ * atomic_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic_t
+ */
+#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
+
/*
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*/
#define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
-#ifdef CONFIG_MIPS64
+#ifdef CONFIG_64BIT
-typedef struct { volatile __s64 counter; } atomic64_t;
+typedef struct { volatile long counter; } atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
*/
#define atomic64_set(v,i) ((v)->counter = (i))
-#ifdef CONFIG_CPU_HAS_LLDSCD
-
/*
* atomic64_add - add integer to atomic variable
* @i: integer value to add
*
* Atomically adds @i to @v.
*/
-static __inline__ void atomic64_add(int i, atomic64_t * v)
+static __inline__ void atomic64_add(long i, atomic64_t * v)
{
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: lld %0, %1 # atomic64_add \n"
- " addu %0, %2 \n"
- " scd %0, %1 \n"
- " beqz %0, 1b \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %0, %1 # atomic64_add \n"
+ " addu %0, %2 \n"
+ " scd %0, %1 \n"
+ " beqzl %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %0, %1 # atomic64_add \n"
+ " addu %0, %2 \n"
+ " scd %0, %1 \n"
+ " beqz %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ v->counter += i;
+ local_irq_restore(flags);
+ }
}
/*
*
* Atomically subtracts @i from @v.
*/
-static __inline__ void atomic64_sub(int i, atomic64_t * v)
+static __inline__ void atomic64_sub(long i, atomic64_t * v)
{
- unsigned long temp;
-
- __asm__ __volatile__(
- "1: lld %0, %1 # atomic64_sub \n"
- " subu %0, %2 \n"
- " scd %0, %1 \n"
- " beqz %0, 1b \n"
- : "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter));
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %0, %1 # atomic64_sub \n"
+ " subu %0, %2 \n"
+ " scd %0, %1 \n"
+ " beqzl %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %0, %1 # atomic64_sub \n"
+ " subu %0, %2 \n"
+ " scd %0, %1 \n"
+ " beqz %0, 1b \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter));
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ v->counter -= i;
+ local_irq_restore(flags);
+ }
}
/*
* Same as above, but return the result value
*/
-static __inline__ int atomic64_add_return(int i, atomic64_t * v)
+static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{
- unsigned long temp, result;
-
- __asm__ __volatile__(
- "1: lld %1, %2 # atomic64_add_return \n"
- " addu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqz %0, 1b \n"
- " addu %0, %1, %3 \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ unsigned long result;
+
+ smp_mb();
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %1, %2 # atomic64_add_return \n"
+ " addu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " beqzl %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %1, %2 # atomic64_add_return \n"
+ " addu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " beqz %0, 1b \n"
+ " addu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = v->counter;
+ result += i;
+ v->counter = result;
+ local_irq_restore(flags);
+ }
+
+ smp_mb();
return result;
}
-static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
+static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{
- unsigned long temp, result;
-
- __asm__ __volatile__(
- "1: lld %1, %2 # atomic64_sub_return \n"
- " subu %0, %1, %3 \n"
- " scd %0, %2 \n"
- " beqz %0, 1b \n"
- " subu %0, %1, %3 \n"
- " sync \n"
- : "=&r" (result), "=&r" (temp), "=m" (v->counter)
- : "Ir" (i), "m" (v->counter)
- : "memory");
+ unsigned long result;
+
+ smp_mb();
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %1, %2 # atomic64_sub_return \n"
+ " subu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " beqzl %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %1, %2 # atomic64_sub_return \n"
+ " subu %0, %1, %3 \n"
+ " scd %0, %2 \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %3 \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = v->counter;
+ result -= i;
+ v->counter = result;
+ local_irq_restore(flags);
+ }
+
+ smp_mb();
return result;
}
-#else
-
-/*
- * This implementation is only atomic with respect to interrupts. It can't
- * be used on SMP
- *
- * atomic64_add - add integer to atomic variable
- * @i: integer value to add
- * @v: pointer of type atomic64_t
- *
- * Atomically adds @i to @v.
- */
-static __inline__ void atomic64_add(int i, atomic64_t * v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&atomic_lock, flags);
- v->counter += i;
- spin_unlock_irqrestore(&atomic_lock, flags);
-}
-
/*
- * atomic64_sub - subtract the atomic variable
+ * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic64_t
*
- * Atomically subtracts @i from @v.
+ * Atomically test @v and subtract @i if @v is greater or equal than @i.
+ * The function returns the old value of @v minus @i.
*/
-static __inline__ void atomic64_sub(int i, atomic64_t * v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&atomic_lock, flags);
- v->counter -= i;
- spin_unlock_irqrestore(&atomic_lock, flags);
-}
-
-static __inline__ int atomic64_add_return(int i, atomic64_t * v)
+static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
{
- unsigned long flags;
- int temp;
+ unsigned long result;
+
+ smp_mb();
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %1, %2 # atomic64_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+ " scd %0, %2 \n"
+ " .set noreorder \n"
+ " beqzl %0, 1b \n"
+ " dsubu %0, %1, %3 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " .set mips3 \n"
+ "1: lld %1, %2 # atomic64_sub_if_positive\n"
+ " dsubu %0, %1, %3 \n"
+ " bltz %0, 1f \n"
+ " scd %0, %2 \n"
+ " .set noreorder \n"
+ " beqz %0, 1b \n"
+ " dsubu %0, %1, %3 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "=m" (v->counter)
+ : "Ir" (i), "m" (v->counter)
+ : "memory");
+ } else {
+ unsigned long flags;
+
+ local_irq_save(flags);
+ result = v->counter;
+ result -= i;
+ if (result >= 0)
+ v->counter = result;
+ local_irq_restore(flags);
+ }
+
+ smp_mb();
- spin_lock_irqsave(&atomic_lock, flags);
- temp = v->counter;
- temp += i;
- v->counter = temp;
- spin_unlock_irqrestore(&atomic_lock, flags);
-
- return temp;
-}
-
-static __inline__ int atomic64_sub_return(int i, atomic64_t * v)
-{
- unsigned long flags;
- int temp;
-
- spin_lock_irqsave(&atomic_lock, flags);
- temp = v->counter;
- temp -= i;
- v->counter = temp;
- spin_unlock_irqrestore(&atomic_lock, flags);
-
- return temp;
+ return result;
}
-#endif /* CONFIG_CPU_HAS_LLDSCD */
-
#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
#define atomic64_inc_return(v) atomic64_add_return(1,(v))
*/
#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ */
+#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
+
/*
* atomic64_inc - increment atomic variable
* @v: pointer of type atomic64_t
*/
#define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0)
-#endif /* CONFIG_MIPS64 */
+#endif /* CONFIG_64BIT */
/*
* atomic*_return operations are serializing but not the non-*_return
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#include <asm-generic/atomic.h>
#endif /* _ASM_ATOMIC_H */