X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Fasm-mips%2Fatomic.h;h=7d89e87bc8c6fd9544ef3b247f57421248741396;hb=6a77f38946aaee1cd85eeec6cf4229b204c15071;hp=c8c6a5a8c5aa7aa42de1f660189b5a629dcbb571;hpb=87fc8d1bb10cd459024a742c6a10961fefcef18f;p=linux-2.6.git diff --git a/include/asm-mips/atomic.h b/include/asm-mips/atomic.h index c8c6a5a8c..7d89e87bc 100644 --- a/include/asm-mips/atomic.h +++ b/include/asm-mips/atomic.h @@ -23,6 +23,9 @@ #ifndef _ASM_ATOMIC_H #define _ASM_ATOMIC_H +#include +#include + extern spinlock_t atomic_lock; typedef struct { volatile int counter; } atomic_t; @@ -46,8 +49,6 @@ typedef struct { volatile int counter; } atomic_t; */ #define atomic_set(v,i) ((v)->counter = (i)) -#ifdef CONFIG_CPU_HAS_LLSC - /* * atomic_add - add integer to atomic variable * @i: integer value to add @@ -57,15 +58,33 @@ typedef struct { volatile int counter; } atomic_t; */ static __inline__ void atomic_add(int i, atomic_t * v) { - unsigned long temp; - - __asm__ __volatile__( - "1: ll %0, %1 # atomic_add \n" - " addu %0, %2 \n" - " sc %0, %1 \n" - " beqz %0, 1b \n" - : "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter)); + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %0, %1 # atomic_add \n" + " addu %0, %2 \n" + " sc %0, %1 \n" + " beqzl %0, 1b \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %0, %1 # atomic_add \n" + " addu %0, %2 \n" + " sc %0, %1 \n" + " beqz %0, 1b \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + v->counter += i; + spin_unlock_irqrestore(&atomic_lock, flags); + } } /* @@ -77,15 +96,33 @@ static __inline__ void atomic_add(int i, atomic_t * v) */ static __inline__ void atomic_sub(int i, atomic_t * v) { - unsigned long temp; - - __asm__ __volatile__( - "1: ll %0, %1 # atomic_sub \n" - " subu %0, %2 \n" - " sc %0, %1 \n" - " beqz %0, 1b \n" - : "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter)); + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %0, %1 # atomic_sub \n" + " subu %0, %2 \n" + " sc %0, %1 \n" + " beqzl %0, 1b \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %0, %1 # atomic_sub \n" + " subu %0, %2 \n" + " sc %0, %1 \n" + " beqz %0, 1b \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + v->counter -= i; + spin_unlock_irqrestore(&atomic_lock, flags); + } } /* @@ -93,36 +130,86 @@ static __inline__ void atomic_sub(int i, atomic_t * v) */ static __inline__ int atomic_add_return(int i, atomic_t * v) { - unsigned long temp, result; - - __asm__ __volatile__( - "1: ll %1, %2 # atomic_add_return \n" - " addu %0, %1, %3 \n" - " sc %0, %2 \n" - " beqz %0, 1b \n" - " addu %0, %1, %3 \n" - " sync \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_add_return \n" + " addu %0, %1, %3 \n" + " sc %0, %2 \n" + " beqzl %0, 1b \n" + " addu %0, %1, %3 \n" + " sync \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_add_return \n" + " addu %0, %1, %3 \n" + " sc %0, %2 \n" + " beqz %0, 1b \n" + " addu %0, %1, %3 \n" + " sync \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + result = v->counter; + result += i; + v->counter = result; + spin_unlock_irqrestore(&atomic_lock, flags); + } return result; } static __inline__ int atomic_sub_return(int i, atomic_t * v) { - unsigned long temp, result; - - __asm__ __volatile__( - "1: ll %1, %2 # atomic_sub_return \n" - " subu %0, %1, %3 \n" - " sc %0, %2 \n" - " beqz %0, 1b \n" - " subu %0, %1, %3 \n" - " sync \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_sub_return \n" + " subu %0, %1, %3 \n" + " sc %0, %2 \n" + " beqzl %0, 1b \n" + " subu %0, %1, %3 \n" + " sync \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_sub_return \n" + " subu %0, %1, %3 \n" + " sc %0, %2 \n" + " beqz %0, 1b \n" + " subu %0, %1, %3 \n" + " sync \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + result = v->counter; + result -= i; + v->counter = result; + spin_unlock_irqrestore(&atomic_lock, flags); + } return result; } @@ -136,112 +223,50 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) */ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { - unsigned long temp, result; - - __asm__ __volatile__( - "1: ll %1, %2 # atomic_sub_if_positive\n" - " subu %0, %1, %3 \n" - " bltz %0, 1f \n" - " sc %0, %2 \n" - " beqz %0, 1b \n" - " sync \n" - "1: \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_sub_if_positive\n" + " subu %0, %1, %3 \n" + " bltz %0, 1f \n" + " sc %0, %2 \n" + " beqzl %0, 1b \n" + " sync \n" + "1: \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: ll %1, %2 # atomic_sub_if_positive\n" + " subu %0, %1, %3 \n" + " bltz %0, 1f \n" + " sc %0, %2 \n" + " beqz %0, 1b \n" + " sync \n" + "1: \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + result = v->counter; + result -= i; + if (result >= 0) + v->counter = result; + spin_unlock_irqrestore(&atomic_lock, flags); + } return result; } -#else - -/* - * The MIPS I implementation is only atomic with respect to - * interrupts. R3000 based multiprocessor machines are rare anyway ... - * - * atomic_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic_add(int i, atomic_t * v) -{ - unsigned long flags; - - spin_lock_irqsave(&atomic_lock, flags); - v->counter += i; - spin_unlock_irqrestore(&atomic_lock, flags); -} - -/* - * atomic_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - unsigned long flags; - - spin_lock_irqsave(&atomic_lock, flags); - v->counter -= i; - spin_unlock_irqrestore(&atomic_lock, flags); -} - -static __inline__ int atomic_add_return(int i, atomic_t * v) -{ - unsigned long flags; - int temp; - - spin_lock_irqsave(&atomic_lock, flags); - temp = v->counter; - temp += i; - v->counter = temp; - spin_unlock_irqrestore(&atomic_lock, flags); - - return temp; -} - -static __inline__ int atomic_sub_return(int i, atomic_t * v) -{ - unsigned long flags; - int temp; - - spin_lock_irqsave(&atomic_lock, flags); - temp = v->counter; - temp -= i; - v->counter = temp; - spin_unlock_irqrestore(&atomic_lock, flags); - - return temp; -} - -/* - * atomic_sub_if_positive - add integer to atomic variable - * @v: pointer of type atomic_t - * - * Atomically test @v and decrement if it is greater than 0. - * The function returns the old value of @v minus 1. - */ -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) -{ - unsigned long flags; - int temp; - - spin_lock_irqsave(&atomic_lock, flags); - temp = v->counter; - temp -= i; - if (temp >= 0) - v->counter = temp; - spin_unlock_irqrestore(&atomic_lock, flags); - - return temp; -} - -#endif /* CONFIG_CPU_HAS_LLSC */ - #define atomic_dec_return(v) atomic_sub_return(1,(v)) #define atomic_inc_return(v) atomic_add_return(1,(v)) @@ -329,8 +354,6 @@ typedef struct { volatile __s64 counter; } atomic64_t; */ #define atomic64_set(v,i) ((v)->counter = (i)) -#ifdef CONFIG_CPU_HAS_LLDSCD - /* * atomic64_add - add integer to atomic variable * @i: integer value to add @@ -340,15 +363,33 @@ typedef struct { volatile __s64 counter; } atomic64_t; */ static __inline__ void atomic64_add(long i, atomic64_t * v) { - unsigned long temp; - - __asm__ __volatile__( - "1: lld %0, %1 # atomic64_add \n" - " addu %0, %2 \n" - " scd %0, %1 \n" - " beqz %0, 1b \n" - : "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter)); + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %0, %1 # atomic64_add \n" + " addu %0, %2 \n" + " scd %0, %1 \n" + " beqzl %0, 1b \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %0, %1 # atomic64_add \n" + " addu %0, %2 \n" + " scd %0, %1 \n" + " beqz %0, 1b \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + v->counter += i; + spin_unlock_irqrestore(&atomic_lock, flags); + } } /* @@ -360,15 +401,33 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) */ static __inline__ void atomic64_sub(long i, atomic64_t * v) { - unsigned long temp; - - __asm__ __volatile__( - "1: lld %0, %1 # atomic64_sub \n" - " subu %0, %2 \n" - " scd %0, %1 \n" - " beqz %0, 1b \n" - : "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter)); + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %0, %1 # atomic64_sub \n" + " subu %0, %2 \n" + " scd %0, %1 \n" + " beqzl %0, 1b \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %0, %1 # atomic64_sub \n" + " subu %0, %2 \n" + " scd %0, %1 \n" + " beqz %0, 1b \n" + : "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter)); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + v->counter -= i; + spin_unlock_irqrestore(&atomic_lock, flags); + } } /* @@ -376,36 +435,86 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) */ static __inline__ long atomic64_add_return(long i, atomic64_t * v) { - unsigned long temp, result; - - __asm__ __volatile__( - "1: lld %1, %2 # atomic64_add_return \n" - " addu %0, %1, %3 \n" - " scd %0, %2 \n" - " beqz %0, 1b \n" - " addu %0, %1, %3 \n" - " sync \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %1, %2 # atomic64_add_return \n" + " addu %0, %1, %3 \n" + " scd %0, %2 \n" + " beqzl %0, 1b \n" + " addu %0, %1, %3 \n" + " sync \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %1, %2 # atomic64_add_return \n" + " addu %0, %1, %3 \n" + " scd %0, %2 \n" + " beqz %0, 1b \n" + " addu %0, %1, %3 \n" + " sync \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + result = v->counter; + result += i; + v->counter = result; + spin_unlock_irqrestore(&atomic_lock, flags); + } return result; } static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { - unsigned long temp, result; - - __asm__ __volatile__( - "1: lld %1, %2 # atomic64_sub_return \n" - " subu %0, %1, %3 \n" - " scd %0, %2 \n" - " beqz %0, 1b \n" - " subu %0, %1, %3 \n" - " sync \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %1, %2 # atomic64_sub_return \n" + " subu %0, %1, %3 \n" + " scd %0, %2 \n" + " beqzl %0, 1b \n" + " subu %0, %1, %3 \n" + " sync \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %1, %2 # atomic64_sub_return \n" + " subu %0, %1, %3 \n" + " scd %0, %2 \n" + " beqz %0, 1b \n" + " subu %0, %1, %3 \n" + " sync \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + result = v->counter; + result -= i; + v->counter = result; + spin_unlock_irqrestore(&atomic_lock, flags); + } return result; } @@ -419,112 +528,50 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) */ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { - unsigned long temp, result; - - __asm__ __volatile__( - "1: lld %1, %2 # atomic64_sub_if_positive\n" - " dsubu %0, %1, %3 \n" - " bltz %0, 1f \n" - " scd %0, %2 \n" - " beqz %0, 1b \n" - " sync \n" - "1: \n" - : "=&r" (result), "=&r" (temp), "=m" (v->counter) - : "Ir" (i), "m" (v->counter) - : "memory"); + unsigned long result; + + if (cpu_has_llsc && R10000_LLSC_WAR) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %1, %2 # atomic64_sub_if_positive\n" + " dsubu %0, %1, %3 \n" + " bltz %0, 1f \n" + " scd %0, %2 \n" + " beqzl %0, 1b \n" + " sync \n" + "1: \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else if (cpu_has_llsc) { + unsigned long temp; + + __asm__ __volatile__( + "1: lld %1, %2 # atomic64_sub_if_positive\n" + " dsubu %0, %1, %3 \n" + " bltz %0, 1f \n" + " scd %0, %2 \n" + " beqz %0, 1b \n" + " sync \n" + "1: \n" + : "=&r" (result), "=&r" (temp), "=m" (v->counter) + : "Ir" (i), "m" (v->counter) + : "memory"); + } else { + unsigned long flags; + + spin_lock_irqsave(&atomic_lock, flags); + result = v->counter; + result -= i; + if (result >= 0) + v->counter = result; + spin_unlock_irqrestore(&atomic_lock, flags); + } return result; } -#else - -/* - * This implementation is only atomic with respect to interrupts. It can't - * be used on SMP - * - * atomic64_add - add integer to atomic variable - * @i: integer value to add - * @v: pointer of type atomic64_t - * - * Atomically adds @i to @v. - */ -static __inline__ void atomic64_add(long i, atomic64_t * v) -{ - unsigned long flags; - - spin_lock_irqsave(&atomic_lock, flags); - v->counter += i; - spin_unlock_irqrestore(&atomic_lock, flags); -} - -/* - * atomic64_sub - subtract the atomic variable - * @i: integer value to subtract - * @v: pointer of type atomic64_t - * - * Atomically subtracts @i from @v. - */ -static __inline__ void atomic64_sub(long i, atomic64_t * v) -{ - unsigned long flags; - - spin_lock_irqsave(&atomic_lock, flags); - v->counter -= i; - spin_unlock_irqrestore(&atomic_lock, flags); -} - -static __inline__ long atomic64_add_return(long i, atomic64_t * v) -{ - unsigned long flags; - long temp; - - spin_lock_irqsave(&atomic_lock, flags); - temp = v->counter; - temp += i; - v->counter = temp; - spin_unlock_irqrestore(&atomic_lock, flags); - - return temp; -} - -static __inline__ long atomic64_sub_return(long i, atomic64_t * v) -{ - unsigned long flags; - long temp; - - spin_lock_irqsave(&atomic_lock, flags); - temp = v->counter; - temp -= i; - v->counter = temp; - spin_unlock_irqrestore(&atomic_lock, flags); - - return temp; -} - -/* - * atomic64_sub_if_positive - add integer to atomic variable - * @v: pointer of type atomic64_t - * - * Atomically test @v and decrement if it is greater than 0. - * The function returns the old value of @v minus 1. - */ -static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) -{ - unsigned long flags; - long temp; - - spin_lock_irqsave(&atomic_lock, flags); - temp = v->counter; - temp -= i; - if (temp >= 0) - v->counter = temp; - spin_unlock_irqrestore(&atomic_lock, flags); - - return temp; -} - -#endif /* CONFIG_CPU_HAS_LLDSCD */ - #define atomic64_dec_return(v) atomic64_sub_return(1,(v)) #define atomic64_inc_return(v) atomic64_add_return(1,(v))